code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 10 , _UpperCamelCase = 22 ):
__lowerCAmelCase : Union[str, Any] = range(1 , _UpperCamelCase )
__lowerCAmelCase : Tuple = range(1 , _UpperCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }') | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[Any] = CTRLTokenizer
A_ : Any = False
A_ : Optional[int] = False
def __lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
__lowerCAmelCase : Dict = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : List[str] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
__lowerCAmelCase : List[str] = {'unk_token': '<unk>'}
__lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = 'adapt react readapt apt'
__lowerCAmelCase : Tuple = 'adapt react readapt apt'
return input_text, output_text
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase : int = 'adapt react readapt apt'
__lowerCAmelCase : List[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
__lowerCAmelCase : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
__lowerCAmelCase : Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , **_SCREAMING_SNAKE_CASE ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = {}
if "candidate_labels" in kwargs:
__lowerCAmelCase : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__lowerCAmelCase : List[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="This is a sound of {}." ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__lowerCAmelCase : Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE ).content
else:
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowerCAmelCase : str = f.read()
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = ffmpeg_read(_SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate )
if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
__lowerCAmelCase : Union[str, Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
__lowerCAmelCase : Optional[Any] = candidate_labels
__lowerCAmelCase : Any = [hypothesis_template.format(_SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__lowerCAmelCase : List[str] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [text_inputs]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs.pop('candidate_labels' )
__lowerCAmelCase : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = text_inputs[0]
else:
# Batching case.
__lowerCAmelCase : Union[str, Any] = text_inputs[0][0]
__lowerCAmelCase : Optional[int] = self.model(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = model_outputs.pop('candidate_labels' )
__lowerCAmelCase : str = model_outputs['logits'][0]
if self.framework == "pt":
__lowerCAmelCase : List[str] = logits.softmax(dim=0 )
__lowerCAmelCase : List[str] = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
__lowerCAmelCase : List[str] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , key=lambda _SCREAMING_SNAKE_CASE : -x[0] )
]
return result | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'vivit'
def __init__( self , _SCREAMING_SNAKE_CASE=2_24 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=[2, 16, 16] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu_fast" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-06 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Optional[Any] = image_size
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Dict = tubelet_size
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : List[str] = qkv_bias
super().__init__(**_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
__lowerCAmelCase : Union[str, Any] = nn.Parameter(_UpperCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
__lowerCAmelCase : Optional[int] = nn.Parameter(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase : Dict = np.asarray(weights[0] )
__lowerCAmelCase : Dict = np.asarray(weights[1] )
__lowerCAmelCase : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCamelCase ).view(-1 , _UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# set torch weights for 1-to-1 comparison
__lowerCAmelCase : List[str] = np.asarray(weights[0] )
__lowerCAmelCase : List[str] = np.asarray(weights[1] )
__lowerCAmelCase : int = np.asarray(weights[2] )
__lowerCAmelCase : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _UpperCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_UpperCamelCase ).view(-1 , _UpperCamelCase ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# layernorm 1
__lowerCAmelCase : Optional[Any] = weights[0][0][0]
__lowerCAmelCase : Optional[int] = np.asarray(layer_norm_a[0] )
__lowerCAmelCase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_UpperCamelCase ) , torch.tensor(_UpperCamelCase ) , )
# lsh weights + output
__lowerCAmelCase : Tuple = weights[0][1]
if len(_UpperCamelCase ) < 4:
set_layer_weights_in_torch_lsh(_UpperCamelCase , torch_block.attention , _UpperCamelCase )
else:
set_layer_weights_in_torch_local(_UpperCamelCase , torch_block.attention , _UpperCamelCase )
# intermediate weighs
__lowerCAmelCase : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(_UpperCamelCase ) == 4:
__lowerCAmelCase : Optional[Any] = intermediate_weights[2]
# layernorm 2
__lowerCAmelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
__lowerCAmelCase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_UpperCamelCase ) , torch.tensor(_UpperCamelCase ) , )
# intermediate dense
__lowerCAmelCase : List[Any] = np.asarray(intermediate_weights[1][0] )
__lowerCAmelCase : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCamelCase ) , )
# intermediate out
__lowerCAmelCase : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
__lowerCAmelCase : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCamelCase ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# reformer model
__lowerCAmelCase : Union[str, Any] = torch_model.reformer
# word embeds
__lowerCAmelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_UpperCamelCase ) , )
if isinstance(weights[3] , _UpperCamelCase ):
__lowerCAmelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowerCAmelCase : Union[str, Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
__lowerCAmelCase : int = nn.Parameter(torch.tensor(_UpperCamelCase ) )
__lowerCAmelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_UpperCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowerCAmelCase : int = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# output layer norm
__lowerCAmelCase : str = np.asarray(weights[7][0] )
__lowerCAmelCase : Tuple = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_UpperCamelCase ) , torch.tensor(_UpperCamelCase ) , )
# output embeddings
__lowerCAmelCase : Optional[int] = np.asarray(weights[9][0] )
__lowerCAmelCase : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_UpperCamelCase ) , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# Initialise PyTorch model
__lowerCAmelCase : Optional[Any] = ReformerConfig.from_json_file(_UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase : int = ReformerModelWithLMHead(_UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as f:
__lowerCAmelCase : Tuple = pickle.load(_UpperCamelCase )['weights']
set_model_weights_in_torch(_UpperCamelCase , _UpperCamelCase , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
import math
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : List[str] = 0
while num > 0:
__lowerCAmelCase : Any = num % 8
__lowerCAmelCase : List[str] = octal + (remainder * math.floor(math.pow(10 , _UpperCamelCase ) ))
counter += 1
__lowerCAmelCase : Tuple = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_UpperCamelCase )}"
def __lowerCAmelCase ():
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main() | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCamelCase__ = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = """cpu"""
lowerCamelCase__ = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
lowerCamelCase__ = """path-to-your-trained-model"""
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase__ = pipe.to(device)
# to channels last
lowerCamelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCamelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCamelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCamelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCamelCase__ = torch.randn(2, 4, 64, 64)
lowerCamelCase__ = torch.rand(1) * 999
lowerCamelCase__ = torch.randn(2, 77, 768)
lowerCamelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCamelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCamelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCamelCase__ = 666
lowerCamelCase__ = torch.Generator(device).manual_seed(seed)
lowerCamelCase__ = {"""generator""": generator}
if args.steps is not None:
lowerCamelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCamelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""") | 86 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( _lowerCamelCase):
@staticmethod
@abstractmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
raise NotImplementedError() | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase__ = logging.getLogger(__name__)
class A__ ( _lowerCamelCase):
A_ : str = 'sequence-classification'
def __init__( self , _SCREAMING_SNAKE_CASE ):
if type(_SCREAMING_SNAKE_CASE ) == dict:
__lowerCAmelCase : Dict = Namespace(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = glue_output_modes[hparams.task]
__lowerCAmelCase : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.mode )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return self.model(**_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCAmelCase : List[str] = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__lowerCAmelCase : Dict = self(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = outputs[0]
__lowerCAmelCase : Union[str, Any] = self.trainer.lr_schedulers[0]['scheduler']
__lowerCAmelCase : Optional[Any] = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.hparams
__lowerCAmelCase : int = processors[args.task]()
__lowerCAmelCase : Any = processor.get_labels()
for mode in ["train", "dev"]:
__lowerCAmelCase : str = self._feature_file(_SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , _SCREAMING_SNAKE_CASE )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
__lowerCAmelCase : Dict = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
__lowerCAmelCase : List[str] = convert_examples_to_features(
_SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , _SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
__lowerCAmelCase : int = 'dev' if mode == 'test' else mode
__lowerCAmelCase : Any = self._feature_file(_SCREAMING_SNAKE_CASE )
logger.info('Loading features from cached file %s' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowerCAmelCase : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowerCAmelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowerCAmelCase : int = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCAmelCase : str = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__lowerCAmelCase : List[Any] = self(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = outputs[:2]
__lowerCAmelCase : int = logits.detach().cpu().numpy()
__lowerCAmelCase : Any = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
__lowerCAmelCase : str = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowerCAmelCase : Tuple = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowerCAmelCase : List[str] = np.squeeze(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = np.concatenate([x['target'] for x in outputs] , axis=0 )
__lowerCAmelCase : str = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCAmelCase : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCAmelCase : List[Any] = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
__lowerCAmelCase : str = dict(results.items() )
__lowerCAmelCase : List[Any] = results
return ret, preds_list, out_label_list
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self._eval_end(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._eval_end(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
BaseTransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=_SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=_SCREAMING_SNAKE_CASE , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def __lowerCAmelCase ():
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
add_generic_args(_UpperCamelCase , os.getcwd() )
__lowerCAmelCase : List[str] = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() )
__lowerCAmelCase : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowerCAmelCase : Dict = os.path.join(
'./results' , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
__lowerCAmelCase : List[str] = GLUETransformer(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = generic_train(_UpperCamelCase , _UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowerCAmelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=_UpperCamelCase ) )
__lowerCAmelCase : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCamelCase )
if __name__ == "__main__":
main() | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , _SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
__lowerCAmelCase : Any = self.model.config
else:
__lowerCAmelCase : int = config
__lowerCAmelCase : Any = data_args
__lowerCAmelCase : int = self.config.tgt_vocab_size if isinstance(self.config , _SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
' padding..' )
if self.args.label_smoothing == 0:
__lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowerCAmelCase : int = label_smoothed_nll_loss
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.optimizer is None:
__lowerCAmelCase : Optional[int] = ['bias', 'LayerNorm.weight']
__lowerCAmelCase : Dict = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
__lowerCAmelCase : List[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowerCAmelCase : int = Adafactor
__lowerCAmelCase : List[Any] = {'scale_parameter': False, 'relative_step': False}
else:
__lowerCAmelCase : Any = AdamW
__lowerCAmelCase : int = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
__lowerCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
__lowerCAmelCase : Optional[Any] = OSS(
params=_SCREAMING_SNAKE_CASE , optim=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : int = optimizer_cls(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__lowerCAmelCase : Any = self._get_lr_scheduler(_SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowerCAmelCase : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__lowerCAmelCase : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__lowerCAmelCase : List[str] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE )
return scheduler
def __lowerCamelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Optional[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__lowerCAmelCase : int = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Optional[int] = torch.nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = inputs.pop('labels' )
__lowerCAmelCase , __lowerCAmelCase : Any = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return loss
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Tuple = self._prepare_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowerCAmelCase : Tuple = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowerCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] )
__lowerCAmelCase : Any = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowerCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowerCAmelCase : Tuple = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# If PAD token is not defined at least EOS token has to be defined
__lowerCAmelCase : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f" padded to `max_length`={max_length}" )
__lowerCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__lowerCAmelCase : Dict = tensor
return padded_tensor | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))""")) | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCamelCase__ = random.Random()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
if rng is None:
__lowerCAmelCase : int = global_rng
__lowerCAmelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=20_00 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1_60_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ):
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Union[str, Any] = min_seq_length
__lowerCAmelCase : Union[str, Any] = max_seq_length
__lowerCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : List[Any] = feature_size
__lowerCAmelCase : str = num_mel_bins
__lowerCAmelCase : Union[str, Any] = padding_value
__lowerCAmelCase : Optional[int] = sampling_rate
__lowerCAmelCase : Dict = return_attention_mask
__lowerCAmelCase : Tuple = do_normalize
def __lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
def _flatten(_SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) )
if equal_length:
__lowerCAmelCase : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : str = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = SpeechaTextFeatureExtractionTester(self )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1E-3 ) )
def __lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase : Union[str, Any] = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : str = feature_extractor(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase : List[Any] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__lowerCAmelCase : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
__lowerCAmelCase : List[Any] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features
__lowerCAmelCase : Tuple = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCAmelCase : Optional[Any] = np.asarray(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features
__lowerCAmelCase : str = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase : List[Any] = ['longest', 'max_length', 'do_not_pad']
__lowerCAmelCase : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = feature_extractor(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = inputs.input_features
__lowerCAmelCase : List[Any] = inputs.attention_mask
__lowerCAmelCase : Union[str, Any] = [np.sum(_SCREAMING_SNAKE_CASE ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase : List[str] = ['longest', 'max_length', 'do_not_pad']
__lowerCAmelCase : Any = [None, 16, None]
for max_length, padding in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = feature_extractor(
_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = inputs.input_features
__lowerCAmelCase : str = inputs.attention_mask
__lowerCAmelCase : Union[str, Any] = [np.sum(_SCREAMING_SNAKE_CASE ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase : Optional[int] = feature_extractor(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = inputs.input_features
__lowerCAmelCase : Optional[Any] = inputs.attention_mask
__lowerCAmelCase : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase : int = feature_extractor(
_SCREAMING_SNAKE_CASE , padding='longest' , max_length=4 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[Any] = inputs.input_features
__lowerCAmelCase : List[Any] = inputs.attention_mask
__lowerCAmelCase : Optional[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase : Optional[int] = feature_extractor(
_SCREAMING_SNAKE_CASE , padding='longest' , max_length=16 , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = inputs.input_features
__lowerCAmelCase : Any = inputs.attention_mask
__lowerCAmelCase : Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __lowerCamelCase ( self ):
import torch
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Tuple = np.random.rand(1_00 , 32 ).astype(np.floataa )
__lowerCAmelCase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase : Optional[int] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
from datasets import load_dataset
__lowerCAmelCase : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowerCAmelCase : Union[str, Any] = ds.sort('id' ).select(range(_SCREAMING_SNAKE_CASE ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : Tuple = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__lowerCAmelCase : List[str] = self._load_datasamples(1 )
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = feature_extractor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (_UpperCamelCase ):
with open(_UpperCamelCase , 'rb' ) as f:
__lowerCAmelCase : Optional[int] = Image.open(_UpperCamelCase )
return im.convert('RGB' )
@dataclass
class A__ :
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'})
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'})
A_ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __lowerCamelCase ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A__ :
A_ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCamelCase)} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : str = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
__lowerCAmelCase : Dict = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowerCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase : Union[str, Any] = {}
if data_args.train_dir is not None:
__lowerCAmelCase : Dict = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowerCAmelCase : Union[str, Any] = os.path.join(data_args.validation_dir , '**' )
__lowerCAmelCase : Optional[int] = load_dataset(
'imagefolder' , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase : Union[str, Any] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__lowerCAmelCase : Tuple = dataset['train'].train_test_split(data_args.train_val_split )
__lowerCAmelCase : List[str] = split['train']
__lowerCAmelCase : Dict = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase : Any = dataset['train'].features['labels'].names
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = {}, {}
for i, label in enumerate(_UpperCamelCase ):
__lowerCAmelCase : Any = str(_UpperCamelCase )
__lowerCAmelCase : Tuple = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase : Optional[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Optional[int] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowerCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowerCAmelCase : List[Any] = image_processor.size['shortest_edge']
else:
__lowerCAmelCase : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
__lowerCAmelCase : List[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowerCAmelCase : Tuple = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowerCAmelCase : Union[str, Any] = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_UpperCamelCase ):
__lowerCAmelCase : List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowerCAmelCase : List[str] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowerCAmelCase : List[Any] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
__lowerCAmelCase : Optional[Any] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase : List[str] = last_checkpoint
__lowerCAmelCase : Dict = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase : Tuple = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
__lowerCAmelCase : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _lowerCamelCase):
A_ : Any = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 2_55 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 2_24}
__lowerCAmelCase : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
__lowerCAmelCase : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__lowerCAmelCase : Any = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Union[str, Any] = resample
__lowerCAmelCase : int = do_center_crop
__lowerCAmelCase : Union[str, Any] = crop_size
__lowerCAmelCase : List[Any] = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : int = do_normalize
__lowerCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase : List[str] = int((2_56 / 2_24) * size['shortest_edge'] )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict['height'], size_dict['width']) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : List[str] = resample if resample is not None else self.resample
__lowerCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : str = image_std if image_std is not None else self.image_std
__lowerCAmelCase : List[str] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase : str = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__lowerCAmelCase : Union[str, Any] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__lowerCAmelCase : Dict = [self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__lowerCAmelCase : Optional[int] = [self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__lowerCAmelCase : Dict = [self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__lowerCAmelCase : Optional[int] = [self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""CLIPFeatureExtractor"""]
lowerCamelCase__ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCamelCase__ = """examples/"""
lowerCamelCase__ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCamelCase__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCamelCase__ = """README.md"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase : Optional[Any] = f.read()
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = REPLACE_PATTERNS[pattern]
__lowerCAmelCase : str = replace.replace('VERSION' , _UpperCamelCase )
__lowerCAmelCase : int = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase ):
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern='examples' )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def __lowerCAmelCase ():
__lowerCAmelCase : Union[str, Any] = '🤗 Transformers currently provides the following architectures'
__lowerCAmelCase : Union[str, Any] = '1. Want to contribute a new model?'
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase : Optional[int] = f.readlines()
# Find the start of the list.
__lowerCAmelCase : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__lowerCAmelCase : Dict = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCamelCase )
def __lowerCAmelCase ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
__lowerCAmelCase : Optional[int] = f.read()
__lowerCAmelCase : Any = REPLACE_PATTERNS['init'][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase=False ):
__lowerCAmelCase : Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__lowerCAmelCase : List[str] = default_version.base_version
elif patch:
__lowerCAmelCase : Optional[int] = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase : Dict = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase : Dict = input(F"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase : Optional[int] = default_version
print(F"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def __lowerCAmelCase ():
__lowerCAmelCase : Tuple = get_version()
__lowerCAmelCase : Optional[int] = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase : int = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase : Union[str, Any] = input(F"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase : Optional[int] = dev_version
print(F"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCamelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 86 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCAmelCase (_UpperCamelCase="" ):
__lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
return os.path.join(_UpperCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.rand(12 , dtype=torch.floataa ) - 0.5
__lowerCAmelCase : Optional[Any] = AgentAudio(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
# Ensure that the file contains the same value as the original tensor
__lowerCAmelCase , __lowerCAmelCase : int = sf.read(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , torch.tensor(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__lowerCAmelCase : int = get_new_path(suffix='.wav' )
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1_60_00 )
__lowerCAmelCase : int = AgentAudio(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , _SCREAMING_SNAKE_CASE )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = torch.randint(0 , 2_56 , (64, 64, 3) )
__lowerCAmelCase : int = AgentImage(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
__lowerCAmelCase : Tuple = Image.open(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = AgentImage(_SCREAMING_SNAKE_CASE )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
__lowerCAmelCase : Any = Image.open(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AgentImage(_SCREAMING_SNAKE_CASE )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'Hey!'
__lowerCAmelCase : Optional[Any] = AgentText(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , agent_type.to_string() )
self.assertEqual(_SCREAMING_SNAKE_CASE , agent_type.to_raw() )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
return 10 - x * x
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_UpperCamelCase ) * equation(_UpperCamelCase ) >= 0:
raise ValueError('Wrong space!' )
__lowerCAmelCase : List[Any] = a
while (b - a) >= 0.01:
# Find middle point
__lowerCAmelCase : int = (a + b) / 2
# Check if middle point is root
if equation(_UpperCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCamelCase ) * equation(_UpperCamelCase ) < 0:
__lowerCAmelCase : Dict = c
else:
__lowerCAmelCase : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _lowerCamelCase):
A_ : int = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 2_55 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 8 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = do_rescale
__lowerCAmelCase : int = rescale_factor
__lowerCAmelCase : Tuple = do_pad
__lowerCAmelCase : List[Any] = pad_size
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ):
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase , __lowerCAmelCase : Dict = get_image_size(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = (old_height // size + 1) * size - old_height
__lowerCAmelCase : List[Any] = (old_width // size + 1) * size - old_width
return pad(_SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Any = do_pad if do_pad is not None else self.do_pad
__lowerCAmelCase : int = pad_size if pad_size is not None else self.pad_size
__lowerCAmelCase : List[str] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__lowerCAmelCase : Union[str, Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
__lowerCAmelCase : Tuple = [self.pad(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Optional[int] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A__ ( unittest.TestCase):
A_ : List[str] = MODEL_FOR_MASKED_LM_MAPPING
A_ : Union[str, Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
__lowerCAmelCase : int = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 3_80_15, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 2_55_06, 'token_str': ' accuser'},
] , )
__lowerCAmelCase : Dict = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 3_80_15,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 2_55_06,
'token_str': ' accuser',
},
] , )
__lowerCAmelCase : Optional[Any] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 29_41, 'token_str': ' Te'},
] , )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
__lowerCAmelCase : List[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 3_56_76, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'},
] , )
__lowerCAmelCase : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'},
] , )
__lowerCAmelCase : Union[str, Any] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 29_41, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'},
] , )
__lowerCAmelCase : str = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
__lowerCAmelCase : Optional[int] = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
@slow
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 6_10, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 15_73, 'token_str': ' Chris'},
] , )
__lowerCAmelCase : Optional[int] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 22_01,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 1_27_90,
'token_str': ' Lyon',
},
] , )
__lowerCAmelCase : Dict = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 29_41, 'token_str': ' Te'},
] , )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Any = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
@require_tf
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
__lowerCAmelCase : str = None
__lowerCAmelCase : Union[str, Any] = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
__lowerCAmelCase : List[str] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = fill_masker.tokenizer
__lowerCAmelCase : str = fill_masker.model
__lowerCAmelCase : List[Any] = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Optional[Any] = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Optional[Any] = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker('This is' )
self.run_test_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = tokenizer.get_vocab()
__lowerCAmelCase : Any = sorted(vocab.keys() )[:2]
# Pipeline argument
__lowerCAmelCase : Optional[Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , targets=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Call argument
__lowerCAmelCase : List[Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : str = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Score equivalence
__lowerCAmelCase : Optional[Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [top_mask['token_str'] for top_mask in outputs]
__lowerCAmelCase : Dict = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[''] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , targets='' )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , top_k=2 )
__lowerCAmelCase : str = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
__lowerCAmelCase : Union[str, Any] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = tokenizer.get_vocab()
__lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
__lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
__lowerCAmelCase : str = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=_SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__lowerCAmelCase : Dict = [el['token_str'] for el in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ).issubset(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=_SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
__lowerCAmelCase : Any = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__lowerCAmelCase : Any = fill_masker(f"My name is {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , ) | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A__ ( _lowerCamelCase):
A_ : Optional[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 2_55 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = size if size is not None else {'shortest_edge': 2_24}
__lowerCAmelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
__lowerCAmelCase : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__lowerCAmelCase : Tuple = do_resize
__lowerCAmelCase : Union[str, Any] = size
__lowerCAmelCase : str = resample
__lowerCAmelCase : Optional[int] = do_center_crop
__lowerCAmelCase : List[Any] = crop_size
__lowerCAmelCase : Any = do_rescale
__lowerCAmelCase : List[str] = rescale_factor
__lowerCAmelCase : Tuple = do_normalize
__lowerCAmelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCAmelCase : str = do_convert_rgb
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowerCAmelCase : List[Any] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['shortest_edge'] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Dict = size if size is not None else self.size
__lowerCAmelCase : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='size' , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = resample if resample is not None else self.resample
__lowerCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : int = image_std if image_std is not None else self.image_std
__lowerCAmelCase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCAmelCase : Optional[Any] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCAmelCase : Union[str, Any] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__lowerCAmelCase : Dict = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__lowerCAmelCase : int = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__lowerCAmelCase : Any = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__lowerCAmelCase : str = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__lowerCAmelCase : List[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 1 |
"""simple docstring"""
from manim import *
class A__ ( _lowerCamelCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase : str = Rectangle(height=0.25 , width=0.25 )
__lowerCAmelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : List[Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : str = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : int = Text('CPU' , font_size=24 )
__lowerCAmelCase : Optional[int] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [mem.copy() for i in range(4 )]
__lowerCAmelCase : Dict = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : int = Text('GPU' , font_size=24 )
__lowerCAmelCase : int = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Optional[Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : str = Text('Model' , font_size=24 )
__lowerCAmelCase : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = []
__lowerCAmelCase : Dict = []
__lowerCAmelCase : List[str] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : Optional[int] = Text('Loaded Checkpoint' , font_size=24 )
__lowerCAmelCase : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
checkpoint.move_to([3, 0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
ckpt_arr.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowerCAmelCase : List[Any] = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase : Any = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : Optional[int] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : Optional[Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : List[Any] = Text('Disk' , font_size=24 )
__lowerCAmelCase : Optional[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , Write(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) )
__lowerCAmelCase : List[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) , )
self.wait() | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase (_UpperCamelCase ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return max(metric_fn(_UpperCamelCase , _UpperCamelCase ) for gt in ground_truths )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : List[Any] = []
if args.gold_data_mode == "qa":
__lowerCAmelCase : Optional[Any] = pd.read_csv(_UpperCamelCase , sep='\t' , header=_UpperCamelCase )
for answer_list in data[1]:
__lowerCAmelCase : Union[str, Any] = ast.literal_eval(_UpperCamelCase )
answers.append(_UpperCamelCase )
else:
__lowerCAmelCase : Optional[int] = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : Optional[int] = [[reference] for reference in references]
__lowerCAmelCase : int = 0
for prediction, ground_truths in zip(_UpperCamelCase , _UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
fa += metric_max_over_ground_truths(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 100.0 * em / total
__lowerCAmelCase : str = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = args.k
__lowerCAmelCase : int = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : str = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : Tuple = 0
for hypo, reference in zip(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = set(hypo.split('\t' )[:k] )
__lowerCAmelCase : Optional[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowerCAmelCase : str = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
def strip_title(_UpperCamelCase ):
if title.startswith('"' ):
__lowerCAmelCase : Any = title[1:]
if title.endswith('"' ):
__lowerCAmelCase : List[Any] = title[:-1]
return title
__lowerCAmelCase : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase , truncation=_UpperCamelCase , )['input_ids'].to(args.device )
__lowerCAmelCase : Optional[int] = rag_model.rag.question_encoder(_UpperCamelCase )
__lowerCAmelCase : Dict = question_enc_outputs[0]
__lowerCAmelCase : Dict = rag_model.retriever(
_UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__lowerCAmelCase : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowerCAmelCase : Tuple = []
for docs in all_docs:
__lowerCAmelCase : int = [strip_title(_UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(_UpperCamelCase ) )
return provenance_strings
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
with torch.no_grad():
__lowerCAmelCase : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase , truncation=_UpperCamelCase )
__lowerCAmelCase : Tuple = inputs_dict.input_ids.to(args.device )
__lowerCAmelCase : Any = inputs_dict.attention_mask.to(args.device )
__lowerCAmelCase : Dict = rag_model.generate( # rag_model overwrites generate
_UpperCamelCase , attention_mask=_UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowerCAmelCase : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
if args.print_predictions:
for q, a in zip(_UpperCamelCase , _UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(_UpperCamelCase , _UpperCamelCase ) )
return answers
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=_UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=_UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=_UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=_UpperCamelCase , type=_UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=_UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=_UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=_UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=_UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=_UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=_UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=_UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=_UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=_UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__lowerCAmelCase : Tuple = parser.parse_args()
__lowerCAmelCase : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = {}
if args.model_type is None:
__lowerCAmelCase : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__lowerCAmelCase : Union[str, Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__lowerCAmelCase : Tuple = args.n_docs
if args.index_name is not None:
__lowerCAmelCase : List[Any] = args.index_name
if args.index_path is not None:
__lowerCAmelCase : Tuple = args.index_path
else:
__lowerCAmelCase : List[Any] = BartForConditionalGeneration
__lowerCAmelCase : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , _UpperCamelCase )
__lowerCAmelCase : str = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__lowerCAmelCase : Optional[Any] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(_UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(_UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__lowerCAmelCase : str = RagRetriever.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_UpperCamelCase , retriever=_UpperCamelCase , **_UpperCamelCase )
model.retriever.init_retrieval()
else:
__lowerCAmelCase : Dict = model_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__lowerCAmelCase : Tuple = []
for line in tqdm(_UpperCamelCase ):
questions.append(line.strip() )
if len(_UpperCamelCase ) == args.eval_batch_size:
__lowerCAmelCase : Union[str, Any] = evaluate_batch_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
preds_file.write('\n'.join(_UpperCamelCase ) + '\n' )
preds_file.flush()
__lowerCAmelCase : Optional[Any] = []
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase : List[str] = evaluate_batch_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
preds_file.write('\n'.join(_UpperCamelCase ) )
preds_file.flush()
score_fn(_UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ = get_args()
main(args) | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _lowerCamelCase):
A_ : int = ['image_processor', 'tokenizer']
A_ : Tuple = 'CLIPImageProcessor'
A_ : Optional[Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
__lowerCAmelCase : List[str] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
__lowerCAmelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.tokenizer.model_input_names
__lowerCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
def get_matched_characters(_UpperCamelCase , _UpperCamelCase ) -> str:
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowerCAmelCase : Any = int(max(0 , i - limit ) )
__lowerCAmelCase : int = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCamelCase )
__lowerCAmelCase : Dict = F"{_stra[0:_stra.index(_UpperCamelCase )]} {_stra[_stra.index(_UpperCamelCase ) + 1:]}"
return "".join(_UpperCamelCase )
# matching characters
__lowerCAmelCase : Optional[Any] = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : int = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = len(_UpperCamelCase )
# transposition
__lowerCAmelCase : List[str] = (
len([(ca, ca) for ca, ca in zip(_UpperCamelCase , _UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__lowerCAmelCase : Optional[int] = 0.0
else:
__lowerCAmelCase : Any = (
1
/ 3
* (
match_count / len(_UpperCamelCase )
+ match_count / len(_UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowerCAmelCase : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world""")) | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Any = [False] * len(_UpperCamelCase )
__lowerCAmelCase : int = [-1] * len(_UpperCamelCase )
def dfs(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Any = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCamelCase , 1 - c )
for i in range(len(_UpperCamelCase ) ):
if not visited[i]:
dfs(_UpperCamelCase , 0 )
for i in range(len(_UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCamelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 86 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = UnCLIPImageVariationPipeline
A_ : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
A_ : Dict = IMAGE_VARIATION_BATCH_PARAMS
A_ : Dict = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
A_ : Dict = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__lowerCAmelCase : Tuple = UnCLIPTextProjModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowerCamelCase ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__lowerCAmelCase : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.dummy_decoder
__lowerCAmelCase : int = self.dummy_text_proj
__lowerCAmelCase : int = self.dummy_text_encoder
__lowerCAmelCase : Any = self.dummy_tokenizer
__lowerCAmelCase : List[Any] = self.dummy_super_res_first
__lowerCAmelCase : List[str] = self.dummy_super_res_last
__lowerCAmelCase : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
__lowerCAmelCase : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
__lowerCAmelCase : Dict = CLIPImageProcessor(crop_size=32 , size=32 )
__lowerCAmelCase : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
if pil_image:
__lowerCAmelCase : List[str] = input_image * 0.5 + 0.5
__lowerCAmelCase : Optional[int] = input_image.clamp(0 , 1 )
__lowerCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase : Any = DiffusionPipeline.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = output.images
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Any = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Union[str, Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'cpu'
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__lowerCAmelCase : Dict = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = output.images
__lowerCAmelCase : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__lowerCAmelCase : List[Any] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__lowerCAmelCase : Union[str, Any] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = torch.device('cpu' )
class A__ :
A_ : Any = 1
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : Any = pipe.decoder.dtype
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : Dict = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCAmelCase : List[Any] = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__lowerCAmelCase : Union[str, Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCAmelCase : int = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
__lowerCAmelCase : List[str] = pipeline_inputs.pop('image' )
__lowerCAmelCase : Optional[Any] = pipe.image_encoder(_SCREAMING_SNAKE_CASE ).image_embeds
__lowerCAmelCase : Union[str, Any] = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE , image_embeddings=_SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCAmelCase : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=_SCREAMING_SNAKE_CASE )
@skip_mps
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = torch_device == 'cpu'
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCAmelCase : str = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE )
@skip_mps
def __lowerCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
__lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
__lowerCAmelCase : int = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
__lowerCAmelCase : List[str] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipeline(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 15 ) | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ = getLogger(__name__)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 8 , _UpperCamelCase = 1024 , _UpperCamelCase="val" , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase="summarization" , _UpperCamelCase=None , _UpperCamelCase=1 , _UpperCamelCase = None , _UpperCamelCase="" , **_UpperCamelCase , ):
__lowerCAmelCase : List[Any] = str(_UpperCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=_UpperCamelCase )
__lowerCAmelCase : List[str] = Path(_UpperCamelCase )
__lowerCAmelCase : Any = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(_UpperCamelCase )
__lowerCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).cuda()
if fpaa:
__lowerCAmelCase : Union[str, Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCamelCase , _UpperCamelCase ) # update config with task specific params
__lowerCAmelCase : str = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__lowerCAmelCase : Optional[int] = num_return_sequences
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__lowerCAmelCase : int = tokenizer.model_max_length
if prefix is None:
__lowerCAmelCase : Any = prefix or getattr(model.config , 'prefix' , '' ) or ''
__lowerCAmelCase : str = SeqaSeqDataset(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , max_target_length=1024 , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , prefix=_UpperCamelCase , **_UpperCamelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__lowerCAmelCase : Tuple = ds.make_sortish_sampler(_UpperCamelCase , distributed=_UpperCamelCase , add_extra_examples=_UpperCamelCase , shuffle=_UpperCamelCase )
__lowerCAmelCase : Dict = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn )
__lowerCAmelCase : Tuple = []
for batch in tqdm(_UpperCamelCase ):
__lowerCAmelCase : List[Any] = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=_UpperCamelCase , num_beams=_UpperCamelCase , **_UpperCamelCase , )
__lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
__lowerCAmelCase : Any = batch['ids']
if num_return_sequences > 1:
__lowerCAmelCase : List[Any] = chunks(_UpperCamelCase , _UpperCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCamelCase ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(_UpperCamelCase , _UpperCamelCase )
return results, sampler.num_replicas
def __lowerCAmelCase ():
__lowerCAmelCase : str = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=_UpperCamelCase , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=_UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=_UpperCamelCase , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument(
'--type_path' , type=_UpperCamelCase , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=_UpperCamelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help='batch size' )
parser.add_argument(
'--local_rank' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=_UpperCamelCase , default=600 , required=_UpperCamelCase , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument('--tgt_lang' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument(
'--prefix' , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
__lowerCAmelCase : Optional[int] = time.time()
__lowerCAmelCase , __lowerCAmelCase : str = parser.parse_known_args()
__lowerCAmelCase : Any = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
__lowerCAmelCase : List[Any] = Path(args.save_dir + '_tmp' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) # this handles locking.
__lowerCAmelCase : List[str] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__lowerCAmelCase : Optional[int] = {}
if args.src_lang is not None:
__lowerCAmelCase : Tuple = args.src_lang
if args.tgt_lang is not None:
__lowerCAmelCase : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase : str = eval_data_dir(
args.data_dir , _UpperCamelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCamelCase , **_UpperCamelCase , )
if args.local_rank <= 0:
__lowerCAmelCase : Optional[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_UpperCamelCase )
__lowerCAmelCase : List[str] = gather_results_from_each_node(_UpperCamelCase , _UpperCamelCase , args.sync_timeout )
__lowerCAmelCase : Optional[Any] = combine_partial_results(_UpperCamelCase )
if args.num_return_sequences > 1:
__lowerCAmelCase : int = save_dir.joinpath('pseudolabel_results.json' )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(_UpperCamelCase , _UpperCamelCase )
return
__lowerCAmelCase : Optional[int] = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(_UpperCamelCase ) as f:
__lowerCAmelCase : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(_UpperCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
__lowerCAmelCase : Optional[Any] = 'translation' in args.task
__lowerCAmelCase : Any = calculate_bleu if calc_bleu else calculate_rouge
__lowerCAmelCase : Any = 'bleu' if calc_bleu else 'rouge'
__lowerCAmelCase : Dict = score_fn(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : int = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = time.time() - start_time
__lowerCAmelCase : Tuple = round(runtime / metrics['n_obs'] , 4 )
__lowerCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__lowerCAmelCase : List[str] = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase )
print(_UpperCamelCase )
write_txt_file(_UpperCamelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(_UpperCamelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = []
for partial_result in partial_results:
records.extend(_UpperCamelCase )
__lowerCAmelCase : List[Any] = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x["id"] )
__lowerCAmelCase : Optional[int] = [x['pred'] for x in records]
return preds
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# WAIT FOR lots of .json files
__lowerCAmelCase : Union[str, Any] = time.time()
logger.info('waiting for all nodes to finish' )
__lowerCAmelCase : Tuple = None
while (time.time() - start_wait) < timeout:
__lowerCAmelCase : Dict = list(save_dir.glob('rank_*.json' ) )
if len(_UpperCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__lowerCAmelCase : List[str] = lmap(_UpperCamelCase , _UpperCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate() | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = None
A_ : int = BloomTokenizerFast
A_ : List[Any] = BloomTokenizerFast
A_ : Dict = True
A_ : str = False
A_ : str = 'tokenizer_file'
A_ : Any = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : Optional[Any] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.get_rust_tokenizer()
__lowerCAmelCase : str = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__lowerCAmelCase : List[Any] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__lowerCAmelCase : int = tokenizer.batch_encode_plus(_SCREAMING_SNAKE_CASE )['input_ids']
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCAmelCase : Union[str, Any] = 'This is a simple input'
__lowerCAmelCase : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : Union[str, Any] = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
tokenizer_r.batch_encode_plus(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
tokenizer_r.encode(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
tokenizer_r.batch_encode_plus(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__lowerCAmelCase : str = None # Hotfixing padding = None
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.get_rust_tokenizer()
__lowerCAmelCase : Optional[Any] = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = next(iter(_SCREAMING_SNAKE_CASE ) )['premise'] # pick up one data
__lowerCAmelCase : Optional[int] = list(sample_data.values() )
__lowerCAmelCase : Union[str, Any] = list(map(tokenizer.encode , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = [tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE ) for x in output_tokens]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCamelCase__ = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
lowerCamelCase__ = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
lowerCamelCase__ = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__lowerCAmelCase : int = np.array([re.sub(_SCREAMING_SNAKE_CASE , '' , _SCREAMING_SNAKE_CASE ) for x in predictions] )
__lowerCAmelCase : List[str] = np.array([re.sub(_SCREAMING_SNAKE_CASE , '' , _SCREAMING_SNAKE_CASE ) for x in references] )
else:
__lowerCAmelCase : Union[str, Any] = np.asarray(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = np.asarray(_SCREAMING_SNAKE_CASE )
if ignore_case:
__lowerCAmelCase : str = np.char.lower(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.char.lower(_SCREAMING_SNAKE_CASE )
if ignore_punctuation:
__lowerCAmelCase : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__lowerCAmelCase : int = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
if ignore_numbers:
__lowerCAmelCase : int = string.digits.maketrans('' , '' , string.digits )
__lowerCAmelCase : List[str] = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = predictions == references
return {"exact_match": np.mean(_SCREAMING_SNAKE_CASE ) * 1_00} | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# save results
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , 'config.json' ) ):
os.remove(os.path.join(_UpperCamelCase , 'config.json' ) )
if os.path.exists(os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : Optional[Any] = 2
if unlogit:
__lowerCAmelCase : str = torch.pow(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = p * torch.log(_UpperCamelCase )
__lowerCAmelCase : Any = 0
return -plogp.sum(dim=-1 )
def __lowerCAmelCase (_UpperCamelCase ):
logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : int = model.config.num_hidden_layers, model.config.num_attention_heads
__lowerCAmelCase : List[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
__lowerCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
__lowerCAmelCase : int = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowerCAmelCase : int = None
__lowerCAmelCase : Tuple = 0.0
__lowerCAmelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__lowerCAmelCase : List[str] = tuple(t.to(args.device ) for t in inputs )
((__lowerCAmelCase) , ) : Tuple = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
__lowerCAmelCase : int = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Tuple = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
__lowerCAmelCase : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_UpperCamelCase )
logger.info('Head ranked by importance scores' )
__lowerCAmelCase : int = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__lowerCAmelCase : Dict = torch.arange(
head_importance.numel() , device=args.device )
__lowerCAmelCase : Any = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
__lowerCAmelCase : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _UpperCamelCase , original_score * args.masking_threshold )
__lowerCAmelCase : Optional[int] = torch.ones_like(_UpperCamelCase )
__lowerCAmelCase : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__lowerCAmelCase : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__lowerCAmelCase : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowerCAmelCase : Optional[Any] = float('Inf' )
__lowerCAmelCase : Tuple = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__lowerCAmelCase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__lowerCAmelCase : Optional[int] = new_head_mask.view(-1 )
__lowerCAmelCase : Optional[int] = 0.0
__lowerCAmelCase : Optional[Any] = new_head_mask.view_as(_UpperCamelCase )
__lowerCAmelCase : Any = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
__lowerCAmelCase : Tuple = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 1 / loss
__lowerCAmelCase : Dict = datetime.now() - before_time
__lowerCAmelCase : str = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : str = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
__lowerCAmelCase : Optional[Any] = 1 / loss
__lowerCAmelCase : Optional[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _UpperCamelCase , _UpperCamelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_UpperCamelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_UpperCamelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_UpperCamelCase , type=_UpperCamelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_UpperCamelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_UpperCamelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_UpperCamelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_UpperCamelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_UpperCamelCase , help='Batch size.' )
parser.add_argument('--seed' , type=_UpperCamelCase , default=42 )
parser.add_argument('--local_rank' , type=_UpperCamelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_UpperCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_UpperCamelCase , default='' , help='Can be used for distant debugging.' )
__lowerCAmelCase : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowerCAmelCase : List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__lowerCAmelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowerCAmelCase : Dict = torch.device('cuda' , args.local_rank )
__lowerCAmelCase : Union[str, Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__lowerCAmelCase : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowerCAmelCase : int = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
__lowerCAmelCase : Tuple = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _UpperCamelCase )
# Prepare dataset
__lowerCAmelCase : List[str] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__lowerCAmelCase : List[Any] = (torch.from_numpy(_UpperCamelCase ),)
__lowerCAmelCase : Optional[int] = TensorDataset(*_UpperCamelCase )
__lowerCAmelCase : Tuple = RandomSampler(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowerCAmelCase : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main() | 86 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class A__ :
def __init__( self ):
__lowerCAmelCase : Optional[Any] = []
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.node_position[vertex]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = pos
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase : str = 2 * start + 1
else:
__lowerCAmelCase : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase : str = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase : Any = temp, tempa
__lowerCAmelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _SCREAMING_SNAKE_CASE )
self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = position[index]
while index != 0:
__lowerCAmelCase : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase : Any = heap[parent]
__lowerCAmelCase : Tuple = position[parent]
self.set_position(position[parent] , _SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = val
__lowerCAmelCase : Dict = temp
self.set_position(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
break
__lowerCAmelCase : str = parent
else:
__lowerCAmelCase : List[Any] = val
__lowerCAmelCase : int = temp
self.set_position(_SCREAMING_SNAKE_CASE , 0 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(_SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = positions[0]
__lowerCAmelCase : Optional[int] = sys.maxsize
self.top_to_bottom(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return temp
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : int = Heap()
__lowerCAmelCase : str = [0] * len(_UpperCamelCase )
__lowerCAmelCase : int = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase : Tuple = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase : Any = []
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Optional[int] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase : int = 0
__lowerCAmelCase : List[Any] = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase : str = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase : int = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase : Tuple = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase__ = int(input("""Enter number of edges: """).strip())
lowerCamelCase__ = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 86 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCamelCase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class A__ ( _lowerCamelCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.framework == "tf":
__lowerCAmelCase : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__lowerCAmelCase : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE )
else:
raise ValueError('Unsupported framework' )
return masked_index
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = self.get_masked_index(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
if return_tensors is None:
__lowerCAmelCase : Dict = self.framework
__lowerCAmelCase : List[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = self.model(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model_inputs['input_ids']
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__lowerCAmelCase : Dict = target_ids.shape[0]
__lowerCAmelCase : Tuple = model_outputs['input_ids'][0]
__lowerCAmelCase : Union[str, Any] = model_outputs['logits']
if self.framework == "tf":
__lowerCAmelCase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__lowerCAmelCase : Optional[int] = outputs.numpy()
__lowerCAmelCase : Dict = outputs[0, masked_index, :]
__lowerCAmelCase : str = stable_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
__lowerCAmelCase : Optional[Any] = tf.gather_nd(tf.squeeze(_SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
__lowerCAmelCase : Dict = tf.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
__lowerCAmelCase : Tuple = tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
__lowerCAmelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__lowerCAmelCase : List[str] = outputs[0, masked_index, :]
__lowerCAmelCase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
__lowerCAmelCase : Dict = probs[..., target_ids]
__lowerCAmelCase , __lowerCAmelCase : Any = probs.topk(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__lowerCAmelCase : Union[str, Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__lowerCAmelCase : Optional[Any] = input_ids.numpy().copy()
if target_ids is not None:
__lowerCAmelCase : int = target_ids[p].tolist()
__lowerCAmelCase : List[Any] = p
# Filter padding out:
__lowerCAmelCase : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__lowerCAmelCase : int = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_SCREAMING_SNAKE_CASE )
result.append(_SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = [targets]
try:
__lowerCAmelCase : Dict = self.tokenizer.get_vocab()
except Exception:
__lowerCAmelCase : str = {}
__lowerCAmelCase : Any = []
for target in targets:
__lowerCAmelCase : Dict = vocab.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if id_ is None:
__lowerCAmelCase : List[Any] = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , max_length=1 , truncation=_SCREAMING_SNAKE_CASE , )['input_ids']
if len(_SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
'We cannot replace it with anything meaningful, ignoring it' )
continue
__lowerCAmelCase : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
__lowerCAmelCase : List[str] = list(set(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('At least one target must be provided when passed.' )
__lowerCAmelCase : List[str] = np.array(_SCREAMING_SNAKE_CASE )
return target_ids
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : List[Any] = {}
if targets is not None:
__lowerCAmelCase : Optional[int] = self.get_target_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = target_ids
if top_k is not None:
__lowerCAmelCase : Tuple = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = 1, 1
__lowerCAmelCase : int = 2
while True:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : List[str] = fa + fa
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = fa, f
index += 1
for _ in str(_UpperCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( _lowerCamelCase , _lowerCamelCase):
A_ : Optional[int] = 1
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE=20_00 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=1E-3 ):
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Dict = torch.linspace(1 , self.config.sampling_eps , _SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCAmelCase : str = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCAmelCase : Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCAmelCase : Optional[int] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCAmelCase : Tuple = std.unsqueeze(-1 )
__lowerCAmelCase : int = -score / std
# compute
__lowerCAmelCase : List[str] = -1.0 / len(self.timesteps )
__lowerCAmelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCAmelCase : Optional[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCAmelCase : Any = beta_t.unsqueeze(-1 )
__lowerCAmelCase : int = -0.5 * beta_t * x
__lowerCAmelCase : Dict = torch.sqrt(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = drift - diffusion**2 * score
__lowerCAmelCase : Tuple = x + drift * dt
# add noise
__lowerCAmelCase : int = randn_tensor(x.shape , layout=x.layout , generator=_SCREAMING_SNAKE_CASE , device=x.device , dtype=x.dtype )
__lowerCAmelCase : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""OwlViTFeatureExtractor"""]
lowerCamelCase__ = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
__lowerCAmelCase : Optional[Any] = 'The dog is cute and lives in the garden house'
__lowerCAmelCase : Optional[int] = jnp.array([tokenizer.encode(_SCREAMING_SNAKE_CASE )] )
__lowerCAmelCase : str = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__lowerCAmelCase : Optional[Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )['last_hidden_state']
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """▁"""
lowerCamelCase__ = {"""vocab_file""": """prophetnet.tokenizer"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
lowerCamelCase__ = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
lowerCamelCase__ = {
"""microsoft/xprophetnet-large-wiki100-cased""": 512,
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = collections.OrderedDict()
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as reader:
__lowerCAmelCase : Any = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
__lowerCAmelCase : int = token.rstrip('\n' )
__lowerCAmelCase : Optional[int] = index
return vocab
class A__ ( _lowerCamelCase):
A_ : Dict = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__lowerCAmelCase : List[Any] = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
__lowerCAmelCase : List[Any] = f"[unused{i}]"
__lowerCAmelCase : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__lowerCAmelCase : Tuple = 12
__lowerCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_SCREAMING_SNAKE_CASE )
def __getstate__( self ):
__lowerCAmelCase : List[Any] = self.__dict__.copy()
__lowerCAmelCase : Tuple = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ):
return len(self.sp_model ) + self.fairseq_offset
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase : Dict = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = ''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
__lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__lowerCAmelCase : int = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
__lowerCAmelCase , __lowerCAmelCase : str = coefficient_matrix.shape
__lowerCAmelCase , __lowerCAmelCase : Dict = constant_matrix.shape
if rowsa != colsa:
__lowerCAmelCase : Tuple = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_UpperCamelCase )
if colsa != 1:
__lowerCAmelCase : int = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_UpperCamelCase )
if rowsa != rowsa:
__lowerCAmelCase : Union[str, Any] = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_UpperCamelCase )
if len(_UpperCamelCase ) != rowsa:
__lowerCAmelCase : str = (
'Number of initial values must be equal to number of rows in coefficient '
F"matrix but received {len(_UpperCamelCase )} and {rowsa}"
)
raise ValueError(_UpperCamelCase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowerCAmelCase , __lowerCAmelCase : str = table.shape
strictly_diagonally_dominant(_UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
for row in range(_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
for col in range(_UpperCamelCase ):
if col == row:
__lowerCAmelCase : Optional[Any] = table[row][col]
elif col == cols - 1:
__lowerCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowerCAmelCase : Tuple = (temp + val) / denom
new_val.append(_UpperCamelCase )
__lowerCAmelCase : Tuple = new_val
return [float(_UpperCamelCase ) for i in new_val]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : int = table.shape
__lowerCAmelCase : Tuple = True
for i in range(0 , _UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = AlbertTokenizer
A_ : int = AlbertTokenizerFast
A_ : List[str] = True
A_ : Optional[int] = True
A_ : Any = True
def __lowerCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Optional[Any] = AlbertTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = 'this is a test'
__lowerCAmelCase : int = 'this is a test'
return input_text, output_text
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = '<pad>'
__lowerCAmelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3_00_00 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : Tuple = self.get_rust_tokenizer()
__lowerCAmelCase : str = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.get_rust_tokenizer()
__lowerCAmelCase : str = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = AlbertTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [48, 25, 21, 12_89] )
__lowerCAmelCase : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
__lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = AlbertTokenizer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer.encode('sequence builders' )
__lowerCAmelCase : Union[str, Any] = tokenizer.encode('multi-sequence build' )
__lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : Union[str, Any] = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , ) | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
lowerCamelCase__ = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
lowerCamelCase__ = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
lowerCamelCase__ = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.5 ):
if NLTK_VERSION >= version.Version('3.6.5' ):
__lowerCAmelCase : Tuple = [
meteor_score.single_meteor_score(
word_tokenize(_SCREAMING_SNAKE_CASE ) , word_tokenize(_SCREAMING_SNAKE_CASE ) , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
else:
__lowerCAmelCase : Dict = [
meteor_score.single_meteor_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(_SCREAMING_SNAKE_CASE )} | 86 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
lowerCamelCase__ = os.path.join(torch_cache_home, """transformers""")
lowerCamelCase__ = """https://cdn.huggingface.co"""
lowerCamelCase__ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
lowerCamelCase__ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
lowerCamelCase__ = os.path.join(PATH, """config.yaml""")
lowerCamelCase__ = os.path.join(PATH, """attributes.txt""")
lowerCamelCase__ = os.path.join(PATH, """objects.txt""")
lowerCamelCase__ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
lowerCamelCase__ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = """pytorch_model.bin"""
lowerCamelCase__ = """config.yaml"""
def __lowerCAmelCase (_UpperCamelCase=OBJECTS , _UpperCamelCase=ATTRIBUTES ):
__lowerCAmelCase : Any = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__lowerCAmelCase : Union[str, Any] = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = OrderedDict()
with open(_UpperCamelCase , 'rb' ) as f:
__lowerCAmelCase : str = pkl.load(_UpperCamelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCAmelCase : int = ckp.pop(_UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
__lowerCAmelCase : List[Any] = torch.tensor(_UpperCamelCase )
else:
assert isinstance(_UpperCamelCase , torch.tensor ), type(_UpperCamelCase )
__lowerCAmelCase : Dict = v
return r
class A__ :
A_ : int = {}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "root" , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = name
__lowerCAmelCase : Any = level
__lowerCAmelCase : Optional[int] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCAmelCase : int = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = Config(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE , level=level + 1 )
__lowerCAmelCase : List[Any] = v
setattr(self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = val
__lowerCAmelCase : Tuple = val
__lowerCAmelCase : List[str] = key.split('.' )
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase : Tuple = self._pointer
if len(_SCREAMING_SNAKE_CASE ) > 1:
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if hasattr(self , _SCREAMING_SNAKE_CASE ) and isinstance(getattr(self , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
setattr(getattr(self , _SCREAMING_SNAKE_CASE ) , '.'.join(levels[i:] ) , _SCREAMING_SNAKE_CASE )
if l == last_level:
__lowerCAmelCase : Tuple = val
else:
__lowerCAmelCase : List[str] = pointer[l]
def __lowerCamelCase ( self ):
return self._pointer
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with open(f"{file_name}" , 'w' ) as stream:
dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with open(f"{file_name}" , 'w' ) as stream:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as stream:
__lowerCAmelCase : Dict = load(_SCREAMING_SNAKE_CASE , Loader=_SCREAMING_SNAKE_CASE )
return data
def __str__( self ):
__lowerCAmelCase : Union[str, Any] = ' '
if self._name != "root":
__lowerCAmelCase : int = f"{t * (self._level-1)}{self._name}:\n"
else:
__lowerCAmelCase : Tuple = ''
__lowerCAmelCase : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(_SCREAMING_SNAKE_CASE ).__name__})\n"
__lowerCAmelCase : List[str] = level
return r[:-1]
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(_SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = kwargs.pop('cache_dir' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = kwargs.pop('force_download' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = kwargs.pop('resume_download' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = kwargs.pop('proxies' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = kwargs.pop('local_files_only' , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif os.path.isfile(_SCREAMING_SNAKE_CASE ) or is_remote_url(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = pretrained_model_name_or_path
else:
__lowerCAmelCase : List[str] = hf_bucket_url(_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , use_cdn=_SCREAMING_SNAKE_CASE )
try:
# Load from URL or cache if already cached
__lowerCAmelCase : List[str] = cached_path(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCAmelCase : int = Config.load_yaml(_SCREAMING_SNAKE_CASE )
except EnvironmentError:
__lowerCAmelCase : Tuple = 'Can\'t load config for'
raise EnvironmentError(_SCREAMING_SNAKE_CASE )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_SCREAMING_SNAKE_CASE ), kwargs
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = torch.load('dump.pt' , map_location=in_tensor.device )
__lowerCAmelCase : Optional[int] = in_tensor.numpy()
__lowerCAmelCase : Dict = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = urlparse(_UpperCamelCase )
return parsed.scheme in ("http", "https")
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
__lowerCAmelCase : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCAmelCase : Any = '/' not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=0 , _UpperCamelCase=None , ):
__lowerCAmelCase : str = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + "; ".join('{}/{}'.format(_UpperCamelCase , _UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + user_agent
__lowerCAmelCase : Dict = {'user-agent': ua}
if resume_size > 0:
__lowerCAmelCase : Any = 'bytes=%d-' % (resume_size,)
__lowerCAmelCase : int = requests.get(_UpperCamelCase , stream=_UpperCamelCase , proxies=_UpperCamelCase , headers=_UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowerCAmelCase : Optional[Any] = response.headers.get('Content-Length' )
__lowerCAmelCase : List[str] = resume_size + int(_UpperCamelCase ) if content_length is not None else None
__lowerCAmelCase : Tuple = tqdm(
unit='B' , unit_scale=_UpperCamelCase , total=_UpperCamelCase , initial=_UpperCamelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_UpperCamelCase ) )
temp_file.write(_UpperCamelCase )
progress.close()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=10 , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=False , ):
if cache_dir is None:
__lowerCAmelCase : Dict = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = str(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase : str = None
if not local_files_only:
try:
__lowerCAmelCase : int = requests.head(_UpperCamelCase , allow_redirects=_UpperCamelCase , proxies=_UpperCamelCase , timeout=_UpperCamelCase )
if response.status_code == 200:
__lowerCAmelCase : int = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCAmelCase : Any = url_to_filename(_UpperCamelCase , _UpperCamelCase )
# get cache path to put the file
__lowerCAmelCase : List[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_UpperCamelCase ):
return cache_path
else:
__lowerCAmelCase : int = [
file
for file in fnmatch.filter(os.listdir(_UpperCamelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_UpperCamelCase ) > 0:
return os.path.join(_UpperCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCAmelCase : Tuple = cache_path + '.lock'
with FileLock(_UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCAmelCase : Tuple = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_UpperCamelCase , 'a+b' ) as f:
yield f
__lowerCAmelCase : str = _resumable_file_manager
if os.path.exists(_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = os.stat(_UpperCamelCase ).st_size
else:
__lowerCAmelCase : Optional[int] = 0
else:
__lowerCAmelCase : Optional[int] = partial(tempfile.NamedTemporaryFile , dir=_UpperCamelCase , delete=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , _UpperCamelCase , temp_file.name , )
http_get(
_UpperCamelCase , _UpperCamelCase , proxies=_UpperCamelCase , resume_size=_UpperCamelCase , user_agent=_UpperCamelCase , )
os.replace(temp_file.name , _UpperCamelCase )
__lowerCAmelCase : Tuple = {'url': url, 'etag': etag}
__lowerCAmelCase : List[str] = cache_path + '.json'
with open(_UpperCamelCase , 'w' ) as meta_file:
json.dump(_UpperCamelCase , _UpperCamelCase )
return cache_path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None ):
__lowerCAmelCase : int = url.encode('utf-8' )
__lowerCAmelCase : List[Any] = shaaaa(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__lowerCAmelCase : List[Any] = etag.encode('utf-8' )
__lowerCAmelCase : Any = shaaaa(_UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , ):
if cache_dir is None:
__lowerCAmelCase : str = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[int] = str(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = str(_UpperCamelCase )
if is_remote_url(_UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowerCAmelCase : List[str] = get_from_cache(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , user_agent=_UpperCamelCase , local_files_only=_UpperCamelCase , )
elif os.path.exists(_UpperCamelCase ):
# File, and it exists.
__lowerCAmelCase : Union[str, Any] = url_or_filename
elif urlparse(_UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_UpperCamelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_UpperCamelCase ) and not tarfile.is_tarfile(_UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = os.path.split(_UpperCamelCase )
__lowerCAmelCase : List[str] = output_file.replace('.' , '-' ) + '-extracted'
__lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCAmelCase : Dict = output_path + '.lock'
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
os.makedirs(_UpperCamelCase )
if is_zipfile(_UpperCamelCase ):
with ZipFile(_UpperCamelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_UpperCamelCase ) )
return output_path_extracted
return output_path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="," ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as f:
__lowerCAmelCase : Optional[int] = eval(f.read() )
else:
__lowerCAmelCase : str = requests.get(_UpperCamelCase )
try:
__lowerCAmelCase : Optional[int] = requests.json()
except Exception:
__lowerCAmelCase : Dict = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCAmelCase : str = eval(_UpperCamelCase )
except Exception:
__lowerCAmelCase : List[Any] = data.split('\n' )
req.close()
return data
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : int = requests.get(_UpperCamelCase )
__lowerCAmelCase : Dict = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as stream:
__lowerCAmelCase : Optional[int] = pkl.load(_UpperCamelCase )
__lowerCAmelCase : Tuple = weights.pop('model' )
__lowerCAmelCase : str = {}
for k, v in model.items():
__lowerCAmelCase : str = torch.from_numpy(_UpperCamelCase )
if "running_var" in k:
__lowerCAmelCase : Optional[Any] = torch.tensor([0] )
__lowerCAmelCase : List[str] = k.replace('running_var' , 'num_batches_tracked' )
__lowerCAmelCase : Tuple = zero
return new
def __lowerCAmelCase ():
print(F"{os.path.abspath(os.path.join(_UpperCamelCase , os.pardir ) )}/demo.ipynb" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="RGB" ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
__lowerCAmelCase : List[str] = cva.imread(_UpperCamelCase )
else:
__lowerCAmelCase : str = get_image_from_url(_UpperCamelCase )
assert img is not None, F"could not connect to: {im}"
__lowerCAmelCase : Dict = cva.cvtColor(_UpperCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCAmelCase : Tuple = img[:, :, ::-1]
return img
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=1 ):
return (images[i : i + batch] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )) | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
__lowerCAmelCase : str = torch.tensor(tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
__lowerCAmelCase : List[Any] = model(_UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
__lowerCAmelCase : List[str] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowerCAmelCase : Optional[int] = logits[0, masked_index, :]
__lowerCAmelCase : Tuple = logits.softmax(dim=0 )
__lowerCAmelCase , __lowerCAmelCase : int = prob.topk(k=_UpperCamelCase , dim=0 )
__lowerCAmelCase : str = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCamelCase ) )] )
__lowerCAmelCase : Optional[int] = tokenizer.mask_token
__lowerCAmelCase : List[str] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
__lowerCAmelCase : Optional[Any] = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_UpperCamelCase ) , _UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCamelCase , _UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCamelCase__ = CamembertTokenizer.from_pretrained("""camembert-base""")
lowerCamelCase__ = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
lowerCamelCase__ = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = type_sequence_label_size
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Union[str, Any] = mask_ratio
__lowerCAmelCase : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowerCAmelCase : Any = (image_size // patch_size) ** 2
__lowerCAmelCase : Optional[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = TFViTMAEModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
__lowerCAmelCase : List[Any] = (self.image_size // self.patch_size) ** 2
__lowerCAmelCase : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Any = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
A_ : Union[str, Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
A_ : Any = False
A_ : List[Any] = False
A_ : List[str] = False
A_ : int = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFViTMAEModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : int = [*signature.parameters.keys()]
__lowerCAmelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
# make the mask reproducible
np.random.seed(2 )
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__lowerCAmelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = copy.deepcopy(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = outputs_dict[0].numpy()
__lowerCAmelCase : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __lowerCamelCase ( self ):
# make the mask reproducible
np.random.seed(2 )
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__lowerCAmelCase : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = v.numpy()
else:
__lowerCAmelCase : Tuple = np.array(_SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = prepare_numpy_arrays(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# make masks reproducible
np.random.seed(2 )
__lowerCAmelCase : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__lowerCAmelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCAmelCase : Tuple = tf.constant(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowerCAmelCase : str = tf_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
# make mask reproducible
np.random.seed(2 )
__lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_SCREAMING_SNAKE_CASE )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_SCREAMING_SNAKE_CASE , '_keras_serializable' , _SCREAMING_SNAKE_CASE )
}
__lowerCAmelCase : Any = int((config.image_size // config.patch_size) ** 2 )
__lowerCAmelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCAmelCase : List[str] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
__lowerCAmelCase : Dict = main_layer_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__lowerCAmelCase : Optional[int] = tf.keras.Model(_SCREAMING_SNAKE_CASE , outputs=main_layer(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , 'keras_model.h5' )
model.save(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = tf.keras.models.load_model(
_SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_SCREAMING_SNAKE_CASE , tf.keras.Model )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# make mask reproducible
np.random.seed(2 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[str] = int((config.image_size // config.patch_size) ** 2 )
__lowerCAmelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
__lowerCAmelCase : Union[str, Any] = outputs.last_hidden_state.numpy()
__lowerCAmelCase : int = 0
else:
__lowerCAmelCase : List[Any] = outputs.logits.numpy()
__lowerCAmelCase : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
__lowerCAmelCase : Optional[int] = after_outputs['last_hidden_state'].numpy()
__lowerCAmelCase : Dict = 0
else:
__lowerCAmelCase : Tuple = after_outputs['logits'].numpy()
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
def __lowerCamelCase ( self ):
# make mask reproducible
np.random.seed(2 )
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = int((config.image_size // config.patch_size) ** 2 )
__lowerCAmelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__lowerCAmelCase : List[str] = model_class.from_config(model.config )
__lowerCAmelCase : Any = new_model(_SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
__lowerCAmelCase : Any = new_model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowerCAmelCase : Optional[Any] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : Dict = prepare_img()
__lowerCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowerCAmelCase : List[str] = ViTMAEConfig()
__lowerCAmelCase : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowerCAmelCase : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
__lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Any = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( _lowerCamelCase):
@staticmethod
@abstractmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
raise NotImplementedError() | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase__ = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
__lowerCAmelCase : Dict = primes[group]['prime']
__lowerCAmelCase : Any = primes[group]['generator']
__lowerCAmelCase : Dict = int(hexlify(urandom(32 ) ) , base=16 )
def __lowerCamelCase ( self ):
return hex(self.__private_key )[2:]
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = pow(self.generator , self.__private_key , self.prime )
return hex(_SCREAMING_SNAKE_CASE )[2:]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = int(_SCREAMING_SNAKE_CASE , base=16 )
if not self.is_valid_public_key(_SCREAMING_SNAKE_CASE ):
raise ValueError('Invalid public key' )
__lowerCAmelCase : Dict = pow(_SCREAMING_SNAKE_CASE , self.__private_key , self.prime )
return shaaaa(str(_SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_SCREAMING_SNAKE_CASE , (prime - 1) // 2 , _SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 14 ):
__lowerCAmelCase : Optional[int] = int(_SCREAMING_SNAKE_CASE , base=16 )
__lowerCAmelCase : List[Any] = int(_SCREAMING_SNAKE_CASE , base=16 )
__lowerCAmelCase : Tuple = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Invalid public key' )
__lowerCAmelCase : Tuple = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return shaaaa(str(_SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : Optional[Any] = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : List[str] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
__lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : str = self.prepare_config_and_inputs()
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Union[str, Any] = BertGenerationEncoder(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Union[str, Any] = BertGenerationEncoder(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[int] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Dict = True
__lowerCAmelCase : str = BertGenerationDecoder(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval()
# first forward pass
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )['hidden_states'][0]
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
__lowerCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Tuple = BertGenerationDecoder(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
A_ : Any = (BertGenerationDecoder,) if is_torch_available() else ()
A_ : Any = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = BertGenerationEncoderTester(self )
__lowerCAmelCase : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase : str = 'bert'
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCAmelCase : Dict = None
self.model_tester.create_and_check_model_as_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__lowerCAmelCase : Dict = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Union[str, Any] = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_torch
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__lowerCAmelCase : str = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Optional[int] = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = [x.strip() for x in open(_UpperCamelCase ).readlines()]
__lowerCAmelCase : List[Any] = [x.strip() for x in open(_UpperCamelCase ).readlines()][: len(_UpperCamelCase )]
__lowerCAmelCase : List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
if save_path is not None:
save_json(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase__ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCAmelCase__ = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase__ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase__ = sorted(arg_to_scheduler.keys())
UpperCAmelCase__ = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class lowercase_ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : argparse.Namespace , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]="base" , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[int] , ) ->Tuple:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
a = 0
a = Path(self.hparams.output_dir )
a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
a = config
a = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
a = tokenizer
a = MODEL_MODES[mode]
if model is None:
a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
a = model
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : str ) ->str:
"""simple docstring"""
a = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = arg_to_scheduler[self.hparams.lr_scheduler]
a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
a = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.model
a = ['''bias''', '''LayerNorm.weight''']
a = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
a = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
a = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
a = optimizer
a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) ->Dict:
"""simple docstring"""
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
return self.validation_end(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Dict ) ->Optional[int]:
"""simple docstring"""
if stage == "test":
a = len(self.test_dataloader().dataset )
else:
a = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
a = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : bool = False ) ->str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return self.train_loader
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict[str, Any] ) ->None:
"""simple docstring"""
a = self.output_dir.joinpath('''best_tfmr''' )
a = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Dict , __UpperCAmelCase : int ) ->int:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=__UpperCAmelCase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''cache''' ) , type=__UpperCAmelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=__UpperCAmelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=__UpperCAmelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=__UpperCAmelCase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=__UpperCAmelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=__UpperCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__UpperCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__UpperCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=__UpperCAmelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=__UpperCAmelCase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__UpperCAmelCase )
parser.add_argument('''--train_batch_size''' , default=32 , type=__UpperCAmelCase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=__UpperCAmelCase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) ->int:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) ->int:
"""simple docstring"""
a = trainer.lr_schedulers[0]['''scheduler''']
a = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) ->Union[str, Any]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
a = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) ->Optional[int]:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
a = trainer.callback_metrics
# Log and save results to file
a = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
def _a ( a :Union[str, Any] , a :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=a , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( a :BaseTransformer , a :argparse.Namespace , a :Tuple=None , a :Any=True , a :List[str]=[] , a :List[Any]=None , a :Union[str, Any]=None , **a :Optional[Any] , ) -> List[str]:
pl.seed_everything(args.seed )
# init model
a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a )
# add custom checkpoints
if checkpoint_callback is None:
a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a )
if logging_callback is None:
a = LoggingCallback()
a = {}
if args.fpaa:
a = 16
if args.gpus > 1:
a = '''auto'''
a = '''ddp'''
a = args.accumulate_grad_batches
a = None
a = '''auto'''
a = pl.Trainer.from_argparse_args(
a , weights_summary=a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a , val_check_interval=1 , num_sanity_val_steps=2 , **a , )
if args.do_train:
trainer.fit(a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 0 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = RobertaPreLayerNormConfig.from_pretrained(
snake_case_ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
UpperCAmelCase_ = torch.load(hf_hub_download(repo_id=snake_case_ , filename="pytorch_model.bin" ) )
UpperCAmelCase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
UpperCAmelCase_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
UpperCAmelCase_ = tensor_value
UpperCAmelCase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case_ , config=snake_case_ , state_dict=snake_case_ )
model.save_pretrained(snake_case_ )
# convert tokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Dict = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['DeiTFeatureExtractor']
lowerCamelCase : Optional[int] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : str = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Tuple = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = '''imagenet-1k-id2label.json'''
A : str = 1000
A : Tuple = '''huggingface/label-files'''
A : List[Any] = num_labels
A : List[Any] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' ) ) , '''r''' ) )
A : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
A : int = idalabel
A : Any = {v: k for k, v in idalabel.items()}
A : List[str] = CvtConfig(num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
A : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
A : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A : Optional[Any] = [2, 2, 20]
A : int = [3, 12, 16]
A : str = [192, 768, 1024]
A : Union[str, Any] = CvtForImageClassification(snake_case__ )
A : Optional[int] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
A : List[str] = image_size
A : Dict = torch.load(snake_case__ , map_location=torch.device('''cpu''' ) )
A : int = OrderedDict()
A : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A : Optional[int] = list_of_state_dict + cls_token(snake_case__ )
A : List[Any] = list_of_state_dict + embeddings(snake_case__ )
for cnt in range(config.depth[idx] ):
A : Dict = list_of_state_dict + attention(snake_case__ , snake_case__ )
A : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case__ )
for i in range(len(snake_case__ ) ):
A : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_84,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def a_ ( lowerCamelCase : float , lowerCamelCase : int ):
lowerCAmelCase = u
for i in range(1 , lowerCamelCase ):
lowerCAmelCase = temp * (u - i)
return temp
def a_ ( ):
lowerCAmelCase = int(input('enter the numbers of values: ' ) )
lowerCAmelCase = []
for _ in range(lowerCamelCase ):
y.append([] )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
y[i].append(lowerCamelCase )
lowerCAmelCase = 0
print('enter the values of parameters in a list: ' )
lowerCAmelCase = list(map(lowerCamelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCamelCase ):
lowerCAmelCase = float(input() )
lowerCAmelCase = int(input('enter the value to interpolate: ' ) )
lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase ):
for j in range(n - i ):
lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase = y[0][0]
for i in range(1 , lowerCamelCase ):
summ += (ucal(lowerCamelCase , lowerCamelCase ) * y[0][i]) / math.factorial(lowerCamelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 4 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_lowercase =os.path.dirname(os.path.realpath(__snake_case ) )
_lowercase =os.path.join(__snake_case , '''words.txt''' )
_lowercase =''''''
with open(__snake_case ) as f:
_lowercase =f.readline()
_lowercase =[word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_lowercase =[
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 5 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : Dict = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (KDPMaDiscreteScheduler,)
lowerCamelCase = 10
def snake_case__ ( self : Dict,**lowercase_ : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001],[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_,beta_end=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='v_prediction' )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps,device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if str(lowercase_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 7 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase_ = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class snake_case_ ( __A ):
'''simple docstring'''
@staticmethod
def snake_case__( _UpperCamelCase : ArgumentParser ) ->Union[str, Any]:
snake_case_ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_UpperCamelCase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_UpperCamelCase )
def __init__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , *_UpperCamelCase : Any , ) ->str:
snake_case_ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
snake_case_ = model_type
snake_case_ = tf_checkpoint
snake_case_ = pytorch_dump_output
snake_case_ = config
snake_case_ = finetuning_task_name
def snake_case__( self : Tuple ) ->Optional[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
snake_case_ = self._tf_checkpoint
snake_case_ = ''''''
else:
snake_case_ = self._tf_checkpoint
snake_case_ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_UpperCamelCase , self._config , self._pytorch_dump_output , _UpperCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_UpperCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' ) | 8 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ['''flax''', '''transformers''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :List[Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Union[str, Any] , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :str ) -> Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''flax''', '''transformers''']
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[Any] ) -> Optional[int]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Any , *lowerCAmelCase__ :str , **lowerCAmelCase__ :str ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :str , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[str] ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ['''flax''', '''transformers''']
def __init__( self :Any , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :List[str] ) -> int:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Optional[Any] , *lowerCAmelCase__ :str , **lowerCAmelCase__ :int ) -> Optional[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Dict , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Tuple ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self :Dict , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[str] ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Tuple , *lowerCAmelCase__ :int , **lowerCAmelCase__ :int ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[str] ) -> str:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 9 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(__a , __a ) -> bool:
lowerCamelCase__: Tuple =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCamelCase__: List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__a ) )
# The ratio of the area for circle to square is pi/4.
lowerCamelCase__: Dict =proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase_ ( __a , __a , __a = 0.0 , __a = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(__a , __a ) ) for _ in range(__a ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __a , __a = 0.0 , __a = 1.0 ) -> None:
"""simple docstring"""
def identity_function(__a ) -> float:
return x
lowerCamelCase__: Dict =area_under_curve_estimator(
__a , __a , __a , __a )
lowerCamelCase__: Union[str, Any] =(max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def lowerCAmelCase_ ( __a ) -> None:
"""simple docstring"""
def function_to_integrate(__a ) -> float:
return sqrt(4.0 - x * x )
lowerCamelCase__: List[Any] =area_under_curve_estimator(
__a , __a , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 0 |
import math
import unittest
def _UpperCAmelCase (UpperCamelCase__ : int ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> Optional[Any]:
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(1_1))
self.assertTrue(is_prime(1_3))
self.assertTrue(is_prime(1_7))
self.assertTrue(is_prime(1_9))
self.assertTrue(is_prime(2_3))
self.assertTrue(is_prime(2_9))
def _lowerCamelCase ( self) -> List[Any]:
with self.assertRaises(__lowerCamelCase):
is_prime(-1_9)
self.assertFalse(
is_prime(0) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 11 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 0 |
class lowerCamelCase__:
def __init__( self: Optional[Any] , UpperCamelCase_: list[int] ):
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = [0] * len_array
if len_array > 0:
__lowerCamelCase = array[0]
for i in range(1 , UpperCamelCase_ ):
__lowerCamelCase = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int ):
__lowerCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase : Optional[int] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 13 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('''String lengths must match!''' )
A__ = 0
for chara, chara in zip(lowercase_ , lowercase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=13 ,A : List[str]=7 ,A : List[str]=True ,A : str=True ,A : int=False ,A : List[Any]=True ,A : Tuple=99 ,A : Optional[int]=32 ,A : Optional[Any]=5 ,A : List[Any]=4 ,A : Optional[Any]=64 ,A : List[Any]="gelu" ,A : List[Any]=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Union[str, Any]=16 ,A : Optional[Any]=2 ,A : str=0.02 ,A : int=3 ,A : List[str]=4 ,A : List[str]=None ,A : Union[str, Any]=2 ,A : str=2 ,A : Dict=2 ,A : Optional[Any]=2 ,A : Any=4 ,A : Optional[Any]=1 ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def UpperCamelCase_ ( self : List[str] ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : str ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def UpperCamelCase_ ( self : int ,A : List[str] ,A : Any ,A : int ,A : str ,A : int ,A : str ):
__A = SqueezeBertModel(config=A )
model.to(A )
model.eval()
__A = model(A ,A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Optional[int] ,A : int ,A : int ,A : Union[str, Any] ):
__A = SqueezeBertForMaskedLM(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[str] ,A : Optional[int] ,A : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : Dict ,A : int ):
__A = SqueezeBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,start_positions=A ,end_positions=A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : List[Any] ,A : Tuple ,A : Dict ,A : Optional[int] ,A : Dict ):
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Dict ,A : Tuple ,A : int ,A : Any ,A : Union[str, Any] ,A : Union[str, Any] ,A : Any ):
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ,A : int ,A : Optional[Any] ,A : Tuple ,A : List[Any] ,A : Tuple ):
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = model(
A ,attention_mask=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Any ):
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case_ = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self ,config_class=A ,dim=37 )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A )
def UpperCamelCase_ ( self : List[str] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : int ):
__A = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
__A = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__A = model(A )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape ,A )
__A = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(A ,A ,atol=1E-4 ) )
| 15 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCamelCase : int = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : str ):
"""simple docstring"""
return torch.atana(lowerCAmelCase , lowerCAmelCase ) / math.pi * 2
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE_ : Optional[int] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase , lowerCAmelCase )
class a__ ( A__ ):
pass
class a__ ( nn.Module ):
def __init__( self : Any,_A : List[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = DiffusionAttnUnetaD(_A,n_attn_layers=4 )
SCREAMING_SNAKE_CASE_ : List[Any] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE_ : int = torch.quasirandom.SobolEngine(1,scramble=_A )
def _snake_case ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = MODELS_MAP[model_name]["url"]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__lowerCamelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__lowerCamelCase : int = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__lowerCamelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__lowerCamelCase : Optional[int] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__lowerCamelCase : Tuple = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__lowerCamelCase : Any = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase ) and not isinstance(lowerCAmelCase , lowerCAmelCase ):
return name.replace(lowerCAmelCase , lowerCAmelCase )
elif name.startswith(lowerCAmelCase ):
return [name.replace(lowerCAmelCase , lowerCAmelCase ) for v in value]
raise ValueError(f'Attn error with {name}' )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : str=1_3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE_ : Tuple = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE_ : Dict = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE_ : Tuple = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE_ : Any = string[:2]
SCREAMING_SNAKE_CASE_ : Optional[int] = string[2:]
else:
SCREAMING_SNAKE_CASE_ : int = string[0]
SCREAMING_SNAKE_CASE_ : Dict = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE_ : List[str] = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : Tuple = "mid_block"
elif depth > 0 and int(lowerCAmelCase ) < 7:
SCREAMING_SNAKE_CASE_ : Dict = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCAmelCase ) > 7:
SCREAMING_SNAKE_CASE_ : List[Any] = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
SCREAMING_SNAKE_CASE_ : Tuple = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : int = f'up_blocks.{max_depth - 1}' if int(lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
SCREAMING_SNAKE_CASE_ : List[str] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE_ : str = convert_resconv_naming(lowerCAmelCase )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE_ : str = convert_attn_naming(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = new_string_left
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE_ : List[str] = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE_ : List[Any] = rename(lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = transform_conv_attns(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = v
return new_state_dict
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if len(lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE_ : str = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE_ : Optional[int] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE_ : List[Any] = v.shape[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE_ : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE_ : List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
SCREAMING_SNAKE_CASE_ : str = download(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE_ : List[Any] = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE_ : str = Object()
SCREAMING_SNAKE_CASE_ : Any = sample_size
SCREAMING_SNAKE_CASE_ : Dict = sample_rate
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Any = UNetaDModel(sample_size=lowerCAmelCase , sample_rate=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = DiffusionUncond(lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase )["state_dict"] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = orig_model.state_dict()
SCREAMING_SNAKE_CASE_ : int = rename_orig_weights(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("kernel" ) for k in list(lowerCAmelCase ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE_ : int = value.squeeze()
SCREAMING_SNAKE_CASE_ : Optional[int] = value
diffusers_model.load_state_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = 1_0_0
SCREAMING_SNAKE_CASE_ : List[Any] = 3_3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = IPNDMScheduler(num_train_timesteps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase )[:-1]
SCREAMING_SNAKE_CASE_ : Dict = get_crash_schedule(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = DanceDiffusionPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(3_3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(num_inference_steps=lowerCAmelCase , generator=lowerCAmelCase ).audios
SCREAMING_SNAKE_CASE_ : Optional[int] = sampling.iplms_sample(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {} )
SCREAMING_SNAKE_CASE_ : List[str] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE_ : str = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE_ : Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , lowerCAmelCase )
print("Diff max" , lowerCAmelCase )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCamelCase : Union[str, Any] = parser.parse_args()
main(args)
| 18 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = PhobertTokenizer
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = ["T@@", "i", "I", "R@@", "r", "e@@"]
lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l à</w>"]
lowerCamelCase_ = {"unk_token": "<unk>"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = "Tôi là VinAI Research"
lowerCamelCase_ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = "Tôi là VinAI Research"
lowerCamelCase_ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
lowerCamelCase_ = tokenizer.tokenize(lowercase )
print(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
| 19 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(SCREAMING_SNAKE_CASE__ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Tuple = _distribute_shards(**SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Optional[int] = _split_gen_kwargs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
else:
lowercase : str = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
assert out == expected
| 20 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
if n == 0:
return 0
_lowercase : Union[str, Any] = float('-inf' )
for i in range(1 , n + 1 ):
_lowercase : int = max(
lowerCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCamelCase_ ) )
return max_revue
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowercase : Any = float('-inf' )
for i in range(1 , n + 1 ):
_lowercase : List[Any] = max(
lowerCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCamelCase_ , lowerCamelCase_ ) , )
_lowercase : Dict = max_revenue
return max_rev[n]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowercase : int = [float('-inf' ) for _ in range(n + 1 )]
_lowercase : str = 0
for i in range(1 , n + 1 ):
_lowercase : Tuple = max_rev[i]
for j in range(1 , i + 1 ):
_lowercase : Any = max(lowerCamelCase_ , prices[j - 1] + max_rev[i - j] )
_lowercase : Optional[Any] = max_revenue_i
return max_rev[n]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if n < 0:
_lowercase : Optional[int] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowerCamelCase_ )
if n > len(lowerCamelCase_ ):
_lowercase : Tuple = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(lowerCamelCase_ )}'''
)
raise ValueError(lowerCamelCase_ )
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : List[str] = [6, 10, 12, 15, 20, 23]
_lowercase : Any = len(lowerCamelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowercase : Tuple = 36
_lowercase : int = top_down_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Tuple = bottom_up_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = naive_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = """timm_backbone"""
def __init__( self : List[str] , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=3 , snake_case_ : str=True , snake_case_ : Dict=True , snake_case_ : Union[str, Any]=None , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = backbone
_UpperCAmelCase = num_channels
_UpperCAmelCase = features_only
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = True
_UpperCAmelCase = out_indices if out_indices is not None else (-1,)
| 22 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def snake_case_ ( _lowerCAmelCase : int ) -> typing.Counter[int]:
UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_lowerCAmelCase , max_perimeter + 1 ):
UpperCAmelCase : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_lowerCAmelCase ):
UpperCAmelCase : Dict = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case_ ( _lowerCAmelCase : int = 1000 ) -> int:
UpperCAmelCase : str = pythagorean_triple(_lowerCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 23 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=snake_case_ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=snake_case_ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=snake_case_ )
return parser.parse_args()
def lowerCamelCase__ ( ) -> List[Any]:
__snake_case = parse_args()
# Import training_script as a module.
__snake_case = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__snake_case = script_fpath.stem
__snake_case = importlib.import_module(snake_case_ )
# Patch sys.argv
__snake_case = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 24 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''mgp-str'''
def __init__(self , SCREAMING_SNAKE_CASE__=[32, 1_28] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=27 , SCREAMING_SNAKE_CASE__=38 , SCREAMING_SNAKE_CASE__=5_02_57 , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = max_token_length
SCREAMING_SNAKE_CASE__ : Any = num_character_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_bpe_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_wordpiece_labels
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Tuple = distilled
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_drop_rate
SCREAMING_SNAKE_CASE__ : Dict = drop_path_rate
SCREAMING_SNAKE_CASE__ : Any = output_aa_attentions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
| 25 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
assert isinstance(snake_case_,snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""",[False, True] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = tmp_path / """cache"""
_A : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Dict = ParquetDatasetReader(snake_case_,cache_dir=snake_case_,keep_in_memory=snake_case_ ).read()
_check_parquet_dataset(snake_case_,snake_case_ )
@pytest.mark.parametrize(
"""features""",[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
],)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = tmp_path / """cache"""
_A : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_A : Optional[int] = features.copy() if features else default_expected_features
_A : Tuple = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : Optional[int] = ParquetDatasetReader(snake_case_,features=snake_case_,cache_dir=snake_case_ ).read()
_check_parquet_dataset(snake_case_,snake_case_ )
@pytest.mark.parametrize("""split""",[None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = tmp_path / """cache"""
_A : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_A : Optional[int] = ParquetDatasetReader(snake_case_,cache_dir=snake_case_,split=snake_case_ ).read()
_check_parquet_dataset(snake_case_,snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""",[str, list] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if issubclass(snake_case_,snake_case_ ):
_A : str = parquet_path
elif issubclass(snake_case_,snake_case_ ):
_A : List[Any] = [parquet_path]
_A : Tuple = tmp_path / """cache"""
_A : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_A : str = ParquetDatasetReader(snake_case_,cache_dir=snake_case_ ).read()
_check_parquet_dataset(snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=("train",) ):
assert isinstance(snake_case_,snake_case_ )
for split in splits:
_A : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""",[False, True] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Optional[Any] = tmp_path / """cache"""
_A : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : List[str] = ParquetDatasetReader(
{"""train""": parquet_path},cache_dir=snake_case_,keep_in_memory=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_,snake_case_ )
@pytest.mark.parametrize(
"""features""",[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
],)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = tmp_path / """cache"""
_A : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_A : str = features.copy() if features else default_expected_features
_A : List[Any] = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : Optional[int] = ParquetDatasetReader({"""train""": parquet_path},features=snake_case_,cache_dir=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_,snake_case_ )
@pytest.mark.parametrize("""split""",[None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if split:
_A : List[str] = {split: parquet_path}
else:
_A : List[Any] = """train"""
_A : Union[str, Any] = {"""train""": parquet_path, """test""": parquet_path}
_A : str = tmp_path / """cache"""
_A : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_A : Union[str, Any] = ParquetDatasetReader(snake_case_,cache_dir=snake_case_ ).read()
_check_parquet_datasetdict(snake_case_,snake_case_,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = ParquetDatasetWriter(snake_case_,tmp_path / """foo.parquet""" )
assert writer.write() > 0
_A : List[str] = pq.ParquetFile(tmp_path / """foo.parquet""" )
_A : Dict = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = str(shared_datadir / """test_image_rgb.jpg""" )
_A : Any = {"""image""": [image_path]}
_A : Union[str, Any] = Features({"""image""": Image()} )
_A : Union[str, Any] = Dataset.from_dict(snake_case_,features=snake_case_ )
_A : str = ParquetDatasetWriter(snake_case_,tmp_path / """foo.parquet""" )
assert writer.write() > 0
_A : int = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_A : int = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ),streaming=snake_case_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""",[
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
],)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
assert get_writer_batch_size(snake_case_ ) == expected
| 26 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
# fmt: off
__a : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__a : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__a : Dict = {'unk_token': '<unk>'}
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
__a : List[str] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__a : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_tokenizer()
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Dict = self.get_image_processor()
__a : Any = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__a : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : int = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : str = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : Tuple = image_processor(__a , return_tensors='np' )
__a : List[Any] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : Optional[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Optional[Any] = 'lower newer'
__a : Union[str, Any] = processor(text=__a )
__a : str = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : List[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : List[str] = 'lower newer'
__a : Optional[int] = self.prepare_image_inputs()
__a : Tuple = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Tuple = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : List[str] = self.prepare_image_inputs()
__a : int = processor(images=__a , visual_prompt=__a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : Union[str, Any] = self.get_tokenizer()
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Any = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
| 27 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase = model_name.find('patch' )
UpperCamelCase = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCamelCase = XCLIPVisionConfig(patch_size=A__ , num_frames=A__ )
if "large" in model_name:
UpperCamelCase = 768
UpperCamelCase = 3_072
UpperCamelCase = 12
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 16
UpperCamelCase = 24
UpperCamelCase = 768
UpperCamelCase = 3_072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = 336
UpperCamelCase = XCLIPConfig.from_text_vision_configs(A__ , A__ )
if "large" in model_name:
UpperCamelCase = 768
return config
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
UpperCamelCase = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCamelCase = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCamelCase = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCamelCase = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCamelCase = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCamelCase = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCamelCase = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCamelCase = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCamelCase = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCamelCase = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCamelCase = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCamelCase = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCamelCase = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCamelCase = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCamelCase = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCamelCase = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCamelCase = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCamelCase = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCamelCase = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(A__ )
if "attn.in_proj" in key:
UpperCamelCase = key.split('.' )
if key.startswith('visual' ):
UpperCamelCase = key_split[3]
UpperCamelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[
:dim
]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
elif key.startswith('mit' ):
UpperCamelCase = key_split[2]
UpperCamelCase = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = key_split[2]
UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = rename_key(A__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase = val.T
UpperCamelCase = val
return orig_state_dict
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
UpperCamelCase = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCamelCase = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCamelCase = 'eating_spaghetti_32_frames.npy'
UpperCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=A__ , repo_type='dataset' , )
UpperCamelCase = np.load(A__ )
return list(A__ )
def __lowerCamelCase ( A__ , A__=None , A__=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCamelCase = model_to_url[model_name]
UpperCamelCase = 8
if "16-frames" in model_name:
UpperCamelCase = 16
elif "shot" in model_name:
UpperCamelCase = 32
UpperCamelCase = get_xclip_config(A__ , A__ )
UpperCamelCase = XCLIPModel(A__ )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase = 'pytorch_model.bin'
gdown.cached_download(A__ , A__ , quiet=A__ )
UpperCamelCase = torch.load(A__ , map_location='cpu' )['model']
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ )['model']
UpperCamelCase = convert_state_dict(A__ , A__ )
UpperCamelCase = XCLIPModel(A__ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
UpperCamelCase = VideoMAEImageProcessor(size=A__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase = XCLIPProcessor(image_processor=A__ , tokenizer=A__ )
UpperCamelCase = prepare_video(A__ )
UpperCamelCase = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=A__ , return_tensors='pt' , padding=A__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase = model(**A__ )
# Verify outputs
UpperCamelCase = outputs.logits_per_video
UpperCamelCase = logits_per_video.softmax(dim=1 )
print('Probs:' , A__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(A__ , A__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(A__ , organization='nielsr' )
processor.push_to_hub(A__ , organization='nielsr' )
slow_tokenizer.push_to_hub(A__ , organization='nielsr' )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 0 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 0 |
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
def count_of_possible_combinations(snake_case__: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__: int , snake_case__: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
lowercase_ = answer
return answer
lowercase_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def a ( snake_case__: int , snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
lowercase_ = [0] * (target + 1)
lowercase_ = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 30 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = ViTImageProcessor if is_vision_available() else None
@property
def _A ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Optional[int] ):
_UpperCAmelCase : Any = (3, 32, 128)
_UpperCAmelCase : Any = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCAmelCase : List[str] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
_UpperCAmelCase : str = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def _A ( self : Optional[Any] , **A : List[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A )
def _A ( self : Dict , **A : List[str] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A )
def _A ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def _A ( self : str ):
_UpperCAmelCase : Union[str, Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_UpperCAmelCase : Union[str, Any] = Image.fromarray(np.moveaxis(A , 0 , -1 ) )
return image_input
def _A ( self : List[Any] ):
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A , image_processor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Dict = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Any = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _A ( self : int ):
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : str = image_processor(A , return_tensors="np" )
_UpperCAmelCase : List[Any] = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = "test"
_UpperCAmelCase : Tuple = processor(text=A )
_UpperCAmelCase : Union[str, Any] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self : Any ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = "test"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def _A ( self : List[str] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Optional[int] = processor.char_decode(A )
_UpperCAmelCase : Tuple = tokenizer.batch_decode(A )
_UpperCAmelCase : Union[str, Any] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(A , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Dict = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : str = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = torch.randn(1 , 27 , 38 )
_UpperCAmelCase : List[Any] = torch.randn(1 , 27 , 50257 )
_UpperCAmelCase : List[str] = torch.randn(1 , 27 , 30522 )
_UpperCAmelCase : Any = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 31 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''facebook/nllb-200-distilled-600M'''
snake_case__ : Union[str, Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case__ : Optional[Any] = '''translator'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : Dict = LANGUAGE_CODES
snake_case__ : str = ['''text''', '''text''', '''text''']
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
a_ : str = self.lang_to_code[src_lang]
a_ : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 32 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : List[str] = logging.get_logger(__name__)
__A : int = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : int = "deta"
SCREAMING_SNAKE_CASE_ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , A : Optional[int]=None , A : Union[str, Any]=9_00 , A : Tuple=20_48 , A : int=6 , A : str=20_48 , A : Any=8 , A : Optional[int]=6 , A : Dict=10_24 , A : str=8 , A : Dict=0.0 , A : Union[str, Any]=True , A : List[Any]="relu" , A : Tuple=2_56 , A : Optional[int]=0.1 , A : int=0.0 , A : str=0.0 , A : List[Any]=0.02 , A : Union[str, Any]=1.0 , A : str=True , A : str=False , A : Optional[int]="sine" , A : Optional[Any]=5 , A : str=4 , A : Union[str, Any]=4 , A : Tuple=True , A : Union[str, Any]=3_00 , A : Optional[Any]=True , A : int=True , A : Dict=1 , A : Tuple=5 , A : Optional[Any]=2 , A : Optional[Any]=1 , A : Any=1 , A : int=5 , A : Optional[Any]=2 , A : List[str]=0.1 , A : Dict=0.25 , **A : Tuple , ) -> Dict:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(A , A ):
lowercase_ : List[str] = backbone_config.pop('''model_type''' )
lowercase_ : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Union[str, Any] = config_class.from_dict(A )
lowercase_ : List[str] = backbone_config
lowercase_ : Optional[int] = num_queries
lowercase_ : str = max_position_embeddings
lowercase_ : Any = d_model
lowercase_ : Optional[Any] = encoder_ffn_dim
lowercase_ : List[str] = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : int = decoder_ffn_dim
lowercase_ : List[Any] = decoder_layers
lowercase_ : int = decoder_attention_heads
lowercase_ : Optional[Any] = dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : str = activation_dropout
lowercase_ : List[str] = activation_function
lowercase_ : int = init_std
lowercase_ : Dict = init_xavier_std
lowercase_ : List[Any] = encoder_layerdrop
lowercase_ : str = auxiliary_loss
lowercase_ : Dict = position_embedding_type
# deformable attributes
lowercase_ : Union[str, Any] = num_feature_levels
lowercase_ : Optional[int] = encoder_n_points
lowercase_ : Dict = decoder_n_points
lowercase_ : Tuple = two_stage
lowercase_ : Union[str, Any] = two_stage_num_proposals
lowercase_ : Tuple = with_box_refine
lowercase_ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase_ : Optional[Any] = class_cost
lowercase_ : Dict = bbox_cost
lowercase_ : Optional[int] = giou_cost
# Loss coefficients
lowercase_ : Optional[int] = mask_loss_coefficient
lowercase_ : Optional[Any] = dice_loss_coefficient
lowercase_ : Dict = bbox_loss_coefficient
lowercase_ : int = giou_loss_coefficient
lowercase_ : Union[str, Any] = eos_coefficient
lowercase_ : Dict = focal_alpha
super().__init__(is_encoder_decoder=A , **A )
@property
def A ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def A ( self : Optional[int] ) -> int:
return self.d_model
def A ( self : List[Any] ) -> Dict:
lowercase_ : str = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : List[Any] = self.__class__.model_type
return output
| 33 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 0 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A =get_logger(__name__)
class _a ( enum.Enum ):
__a : Union[str, Any] = """all_checks"""
__a : Tuple = """basic_checks"""
__a : Tuple = """no_checks"""
class _a ( __a ):
pass
class _a ( __a ):
pass
class _a ( __a ):
pass
class _a ( __a ):
pass
def snake_case_ (_a : Optional[dict] , _a : dict , _a : Any=None ):
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_a ) - set(_a ) ) )
UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCAmelCase = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(_a ) > 0:
raise NonMatchingChecksumError(
F"Checksums didn't match{for_verification_name}:\n"
F"{bad_urls}\n"
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class _a ( __a ):
pass
class _a ( __a ):
pass
class _a ( __a ):
pass
class _a ( __a ):
pass
def snake_case_ (_a : Optional[dict] , _a : dict ):
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(_a ) - set(_a ) ) > 0:
raise ExpectedMoreSplits(str(set(_a ) - set(_a ) ) )
if len(set(_a ) - set(_a ) ) > 0:
raise UnexpectedSplits(str(set(_a ) - set(_a ) ) )
UpperCAmelCase = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a ) > 0:
raise NonMatchingSplitsSizesError(str(_a ) )
logger.info('''All the splits matched successfully.''' )
def snake_case_ (_a : str , _a : bool = True ):
if record_checksum:
UpperCAmelCase = shaaaa()
with open(_a , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b'''''' ):
m.update(_a )
UpperCAmelCase = m.hexdigest()
else:
UpperCAmelCase = None
return {"num_bytes": os.path.getsize(_a ), "checksum": checksum}
def snake_case_ (_a : Optional[int] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 34 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : Tuple = 5
# Realm tok
snake_case__ : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
snake_case__ : Union[str, Any] = os.path.join(snake_case_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
def lowerCamelCase ( self : List[str] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase ( self : int ):
snake_case__ : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=snake_case_ , )
return block_records
def lowerCamelCase ( self : int ):
snake_case__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = self.get_config()
snake_case__ : Any = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : Optional[int] = np.array([0, 3] , dtype="""long""" )
snake_case__ : List[Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : List[str] = tokenizer(
["""the fourth"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : List[Any] = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.get_config()
snake_case__ : List[str] = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" )
snake_case__ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : Optional[int] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : Any = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , snake_case_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
snake_case__ : Tuple = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 35 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = dataset
_lowerCAmelCase : str = process
_lowerCAmelCase : Optional[Any] = params
def __len__( self):
'''simple docstring'''
return len(self.dataset)
def __getitem__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.dataset[i]
_lowerCAmelCase : Tuple = self.process(__a, **self.params)
return processed
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : str = loader
_lowerCAmelCase : List[Any] = infer
_lowerCAmelCase : Optional[int] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[Any] = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[str] = None
def __len__( self):
'''simple docstring'''
return len(self.loader)
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : str = iter(self.loader)
return self
def snake_case__ ( self):
'''simple docstring'''
if isinstance(self._loader_batch_data, torch.Tensor):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(__a, __a):
# Convert ModelOutput to tuple first
_lowerCAmelCase : Optional[int] = element.to_tuple()
if isinstance(element[0], torch.Tensor):
_lowerCAmelCase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray):
_lowerCAmelCase : int = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__a, __a):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor):
_lowerCAmelCase : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0], np.ndarray):
_lowerCAmelCase : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase : str = None
elif isinstance(element[self._loader_batch_index], torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase : Tuple = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index], np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase : Tuple = np.expand_dims(element[self._loader_batch_index], 0)
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase : Optional[int] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase : str = self._loader_batch_data.__class__(__a)
self._loader_batch_index += 1
return result
def snake_case__ ( self):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase : Union[str, Any] = next(self.iterator)
_lowerCAmelCase : Optional[int] = self.infer(__a, **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__a, torch.Tensor):
_lowerCAmelCase : Any = processed
else:
_lowerCAmelCase : List[Any] = list(processed.keys())[0]
_lowerCAmelCase : List[Any] = processed[key]
if isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = len(__a)
else:
_lowerCAmelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase : Any = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase : Tuple = processed
_lowerCAmelCase : Tuple = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, __a=None):
'''simple docstring'''
super().__init__(__a, __a, __a)
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = iter(self.loader)
_lowerCAmelCase : Dict = None
return self
def snake_case__ ( self):
'''simple docstring'''
if self.subiterator is None:
_lowerCAmelCase : int = self.infer(next(self.iterator), **self.params)
try:
# Try to return next item
_lowerCAmelCase : Union[str, Any] = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase : List[str] = self.infer(next(self.iterator), **self.params)
_lowerCAmelCase : Optional[Any] = next(self.subiterator)
return processed
class UpperCAmelCase_ ( a):
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : Any = iter(self.loader)
return self
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Dict = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase : Union[str, Any] = self.loader_batch_item()
_lowerCAmelCase : int = item.pop("is_last")
accumulator.append(__a)
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase : Dict = self.infer(next(self.iterator), **self.params)
if self.loader_batch_size is not None:
if isinstance(__a, torch.Tensor):
_lowerCAmelCase : List[str] = processed
else:
_lowerCAmelCase : Any = list(processed.keys())[0]
_lowerCAmelCase : Dict = processed[key]
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = len(__a)
else:
_lowerCAmelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase : List[Any] = observed_batch_size
_lowerCAmelCase : Optional[int] = processed
_lowerCAmelCase : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase : Union[str, Any] = self.loader_batch_item()
_lowerCAmelCase : Optional[int] = item.pop("is_last")
accumulator.append(__a)
if is_last:
return accumulator
else:
_lowerCAmelCase : Optional[int] = processed
_lowerCAmelCase : List[str] = item.pop("is_last")
accumulator.append(__a)
return accumulator
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[int] = key
def __len__( self):
'''simple docstring'''
return len(self.dataset)
def __getitem__( self, __a):
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = dataset
_lowerCAmelCase : Any = keya
_lowerCAmelCase : List[Any] = keya
def __len__( self):
'''simple docstring'''
return len(self.dataset)
def __getitem__( self, __a):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 36 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,**__UpperCAmelCase ) -> Tuple:
super().__init__(**__UpperCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int:
lowerCAmelCase__ : str = load_image(__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework )
lowerCAmelCase__ : List[Any] = candidate_labels
lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,__UpperCAmelCase ):
lowerCAmelCase__ : int = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : Dict = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Optional[Any] = probs.tolist()
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 37 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[str] = XGLMConfig
snake_case__ : Optional[Any] = {}
snake_case__ : Optional[int] = """gelu"""
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=14 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=0.02 , ):
UpperCamelCase :Tuple = parent
UpperCamelCase :Optional[Any] = batch_size
UpperCamelCase :List[Any] = seq_length
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Tuple = use_input_mask
UpperCamelCase :List[str] = use_labels
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :Tuple = d_model
UpperCamelCase :Any = num_hidden_layers
UpperCamelCase :Union[str, Any] = num_attention_heads
UpperCamelCase :Optional[Any] = ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :str = activation_dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :List[str] = max_position_embeddings
UpperCamelCase :Dict = initializer_range
UpperCamelCase :List[Any] = None
UpperCamelCase :str = 0
UpperCamelCase :Any = 2
UpperCamelCase :Any = 1
def _A ( self : int ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase :List[str] = None
if self.use_input_mask:
UpperCamelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :str = self.get_config()
UpperCamelCase :List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _A ( self : str ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__lowerCamelCase , )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Optional[Any] = config_and_inputs
UpperCamelCase :str = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
snake_case__ : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
snake_case__ : Optional[Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : List[str] = False
def _A ( self : List[Any] ):
UpperCamelCase :Optional[Any] = TFXGLMModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def _A ( self : int ):
self.config_tester.run_common_tests()
@slow
def _A ( self : int ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :str = TFXGLMModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _A ( self : Dict ):
super().test_resize_token_embeddings()
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[int]=True ):
UpperCamelCase :Dict = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :str = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase :int = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase :str = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase )
@slow
def _A ( self : Tuple ):
UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
UpperCamelCase :Optional[int] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
UpperCamelCase :str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
UpperCamelCase :Any = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , seed=[7, 0] )
UpperCamelCase :str = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _A ( self : int ):
UpperCamelCase :List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :str = """left"""
# use different length sentences to test batching
UpperCamelCase :Union[str, Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
UpperCamelCase :str = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding=__lowerCamelCase )
UpperCamelCase :Optional[int] = inputs["""input_ids"""]
UpperCamelCase :Dict = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
UpperCamelCase :List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCamelCase :List[Any] = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase :str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCamelCase :str = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase :Any = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
| 38 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MobileViTFeatureExtractor''']
_a = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.