code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 409 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = 1
lowercase = FrozenDict(_lowerCAmelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = True
lowercase = FrozenDict(_lowerCAmelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowerCAmelCase , segmentation_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def _a ( self , _lowerCAmelCase = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
self.enable_attention_slicing(_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowercase = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 512 , _lowerCAmelCase = 512 , _lowerCAmelCase = 50 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 1 , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
lowercase = self.segmentation_model(**_lowerCAmelCase )
lowercase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase = self.numpy_to_pil(_lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , height=_lowerCAmelCase , width=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , negative_prompt=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , latents=_lowerCAmelCase , output_type=_lowerCAmelCase , return_dict=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=_lowerCAmelCase , )
| 704 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
snake_case : Any = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
snake_case : Optional[Any] = dict(zip(vocab, range(len(vocab))))
snake_case : int = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Optional[int] = Path(tmpdirname)
snake_case : str = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
snake_case : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
snake_case : int = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
snake_case : Optional[int] = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
snake_case : Dict = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
snake_case : Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
snake_case : Any = tokenizer(['Making tiny model'], return_tensors='pt')
snake_case : int = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 566 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : str = "git_vision_model"
def __init__( self , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3 , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=1_6 , __UpperCAmelCase="quick_gelu" , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
__lowercase , __lowercase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Any = "git"
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=3_0_5_2_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=6 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1_0_1 , __UpperCAmelCase=1_0_2 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
if vision_config is None:
__lowercase = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__lowercase = GitVisionConfig(**__UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = tie_word_embeddings
__lowercase = num_image_with_embedding
__lowercase = bos_token_id
__lowercase = eos_token_id
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 566 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =graph
self._normalize_graph(__A , __A )
_lowerCAmelCase =len(__A )
_lowerCAmelCase =None
def UpperCamelCase__ ( self , __A , __A ) -> Optional[int]:
if sources is int:
_lowerCAmelCase =[sources]
if sinks is int:
_lowerCAmelCase =[sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
_lowerCAmelCase =sources[0]
_lowerCAmelCase =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
_lowerCAmelCase =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_lowerCAmelCase =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_lowerCAmelCase =max_input_flow
_lowerCAmelCase =0
_lowerCAmelCase =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_lowerCAmelCase =max_input_flow
_lowerCAmelCase =size - 1
def UpperCamelCase__ ( self ) -> Optional[Any]:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCamelCase__ ( self , __A ) -> Dict:
_lowerCAmelCase =algorithm(self )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> Dict:
_lowerCAmelCase =flow_network
_lowerCAmelCase =flow_network.verticesCount
_lowerCAmelCase =flow_network.sourceIndex
_lowerCAmelCase =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_lowerCAmelCase =flow_network.graph
_lowerCAmelCase =False
def UpperCamelCase__ ( self ) -> List[str]:
if not self.executed:
self._algorithm()
_lowerCAmelCase =True
def UpperCamelCase__ ( self ) -> Tuple:
pass
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A ) -> List[Any]:
super().__init__(__A )
# use this to save your result
_lowerCAmelCase =-1
def UpperCamelCase__ ( self ) -> Dict:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A ) -> List[str]:
super().__init__(__A )
_lowerCAmelCase =[[0] * self.verticies_count for i in range(self.verticies_count )]
_lowerCAmelCase =[0] * self.verticies_count
_lowerCAmelCase =[0] * self.verticies_count
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_lowerCAmelCase =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_lowerCAmelCase =0
while i < len(__A ):
_lowerCAmelCase =vertices_list[i]
_lowerCAmelCase =self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
_lowerCAmelCase =0
else:
i += 1
_lowerCAmelCase =sum(self.preflow[self.source_index] )
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def UpperCamelCase__ ( self , __A , __A ) -> List[Any]:
_lowerCAmelCase =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCamelCase__ ( self , __A ) -> Optional[int]:
_lowerCAmelCase =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_lowerCAmelCase =self.heights[to_index]
if min_height is not None:
_lowerCAmelCase =min_height + 1
if __name__ == "__main__":
lowercase_ = [0]
lowercase_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowercase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowercase_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowercase_ = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 58 | '''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 177 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowercase : List[str] = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowercase : int = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : str = set()
snake_case_ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : List[Any] = char
snake_case_ : Any = set(_a )
return pairs
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : str = VOCAB_FILES_NAMES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = vocab_file
snake_case_ : Any = merges_file
snake_case_ : Any = {}
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 1
snake_case_ : Optional[int] = 2
snake_case_ : Optional[int] = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
snake_case_ : List[Any] = merges_handle.read().split("\n" )[:-1]
snake_case_ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case_ : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Optional[Any] = {}
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
snake_case_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Dict:
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
snake_case_ : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case_ : List[str] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : Dict = bigram
snake_case_ : Any = []
snake_case_ : Any = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
snake_case_ : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Optional[Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case_ : List[Any] = get_pairs(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = "@@ ".join(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = word[:-4]
snake_case_ : Union[str, Any] = word
return word
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ : List[Any] = []
snake_case_ : str = re.findall(r"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Any = " ".join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
snake_case_ : Dict = f.readlines()
for lineTmp in lines:
snake_case_ : Tuple = lineTmp.strip()
snake_case_ : Dict = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
snake_case_ : Optional[Any] = line[:idx]
snake_case_ : List[Any] = len(self.encoder )
| 568 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase :List[str] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :str = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 266 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = int(number**0.5 )
return number == sq * sq
def _a ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__UpperCAmelCase : int = x_den * y_den * z_den
__UpperCAmelCase : int = gcd(_lowercase , _lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( _lowercase : int = 35 ):
'''simple docstring'''
__UpperCAmelCase : set = set()
__UpperCAmelCase : int
__UpperCAmelCase : Fraction = Fraction(0 )
__UpperCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__UpperCAmelCase : Optional[int] = x_num * y_den + x_den * y_num
__UpperCAmelCase : Dict = x_den * y_den
__UpperCAmelCase : List[Any] = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Dict = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCAmelCase : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__UpperCAmelCase : Any = x_den * x_den * y_den * y_den
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCAmelCase : List[Any] = int(sqrt(_lowercase ) )
__UpperCAmelCase : Tuple = int(sqrt(_lowercase ) )
__UpperCAmelCase : Union[str, Any] = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Union[str, Any] = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=-1
__UpperCAmelCase : Union[str, Any] = x_num * y_num
__UpperCAmelCase : List[Any] = x_den * y_num + x_num * y_den
__UpperCAmelCase : Any = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Optional[Any] = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCAmelCase : Optional[Any] = x_num * x_num * y_num * y_num
__UpperCAmelCase : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCAmelCase : Any = int(sqrt(_lowercase ) )
__UpperCAmelCase : List[Any] = int(sqrt(_lowercase ) )
__UpperCAmelCase : Any = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : int = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
for num, den in unique_s:
total += Fraction(_lowercase , _lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""") | 266 | 1 |
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase__ ( snake_case_ : np.ndarray , snake_case_ : tuple[int, int] , snake_case_ : tuple[int, int] , snake_case_ : bool , ) -> tuple[float | int, list[tuple[int, int]]]:
__snake_case = grid.shape
__snake_case = [-1, 1, 0, 0]
__snake_case = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__snake_case = [(0, source)], set()
__snake_case = np.full((rows, cols) , np.inf )
__snake_case = 0
__snake_case = np.empty((rows, cols) , dtype=_lowerCAmelCase )
__snake_case = None
while queue:
(__snake_case) = heappop(_lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__snake_case = []
while (x, y) != source:
path.append((x, y) )
__snake_case = predecessors[x, y]
path.append(_lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCAmelCase ) ):
__snake_case = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__snake_case = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCAmelCase , (dist + 1, (nx, ny)) )
__snake_case = dist + 1
__snake_case = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
UpperCAmelCase : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
UpperCAmelCase : int = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase : str = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__: List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCamelCase__: str = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 127 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 401 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''SpeechT5FeatureExtractor'''
lowerCamelCase__ = '''SpeechT5Tokenizer'''
def __init__( self :List[Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self :Optional[int] , *_lowerCamelCase :Dict , **_lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''audio''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''text''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''text_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''audio_target''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
elif text is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(audio_target=_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_values''']
elif text_target is not None:
__SCREAMING_SNAKE_CASE : str = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : int = labels
__SCREAMING_SNAKE_CASE : Dict = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Any = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :Dict , **_lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''input_values''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''input_ids''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''labels''' , _lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : int = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Any = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCamelCase , _lowerCamelCase ) and "input_ids" in labels[0]):
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = targets['''input_ids''']
else:
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.feature_size
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.num_mel_bins
__SCREAMING_SNAKE_CASE : Any = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = feature_size_hack
__SCREAMING_SNAKE_CASE : Any = targets['''input_values''']
else:
__SCREAMING_SNAKE_CASE : Dict = None
if inputs is None:
return targets
if targets is not None:
__SCREAMING_SNAKE_CASE : List[Any] = labels
__SCREAMING_SNAKE_CASE : int = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Tuple , *_lowerCamelCase :Tuple , **_lowerCamelCase :Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , *_lowerCamelCase :List[Any] , **_lowerCamelCase :List[str] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
| 401 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_A : Tuple = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_A : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(snake_case_ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(snake_case_ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(snake_case_ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
assert gg.gaussian_filter(snake_case_ , 5 , sigma=0.9 ).all()
def UpperCamelCase_ ( ) -> str:
'''simple docstring'''
__lowerCAmelCase = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
__lowerCAmelCase = conv.img_convolve(snake_case_ , snake_case_ ).astype(snake_case_ )
assert res.any()
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
assert med.median_filter(snake_case_ , 3 ).any()
def UpperCamelCase_ ( ) -> Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(snake_case_ )
assert grad.any() and theta.any()
def UpperCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(snake_case_ , 20 )
assert sepia.all()
def UpperCamelCase_ ( snake_case_ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(snake_case_ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase_ ( snake_case_ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(snake_case_ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(snake_case_ , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(snake_case_ , snake_case_ , snake_case_ )
assert lbp_image.any()
| 427 | '''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
_A : Any = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
_A : Optional[int] = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
_A : Any = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def a ( self : Any ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=None ) -> Union[str, Any]:
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ ) ),
}
| 427 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def A ( A_ : Dict , A_ : Dict , A_ : List[Any] , A_ : List[str] , A_ : int ):
for attribute in key.split('''.''' ):
snake_case : List[str] = getattr(__snake_case , __snake_case )
if weight_type is not None:
snake_case : List[Any] = getattr(__snake_case , __snake_case ).shape
else:
snake_case : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case : int = value
elif weight_type == "weight_g":
snake_case : Optional[Any] = value
elif weight_type == "weight_v":
snake_case : Dict = value
elif weight_type == "bias":
snake_case : List[str] = value
else:
snake_case : int = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def A ( A_ : Dict , A_ : Optional[Any] , A_ : Optional[int] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
snake_case : Any = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
snake_case : Union[str, Any] = True
if "*" in mapped_key:
snake_case : Optional[Any] = name.split(__snake_case )[0].split('''.''' )[-2]
snake_case : List[Any] = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
snake_case : Any = '''weight_g'''
elif "weight_v" in name:
snake_case : Tuple = '''weight_v'''
elif "weight" in name:
snake_case : int = '''weight'''
elif "bias" in name:
snake_case : int = '''bias'''
else:
snake_case : Tuple = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A ( A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Optional[Any] , A_ : Any ):
snake_case : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case : List[Any] = name.split('''.''' )
snake_case : Any = int(items[0] )
snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def A ( A_ : str , A_ : int , A_ : int=None , A_ : List[str]=None , A_ : Dict=True ):
if config_path is not None:
snake_case : List[Any] = HubertConfig.from_pretrained(__snake_case )
else:
snake_case : Union[str, Any] = HubertConfig()
if is_finetuned:
if dict_path:
snake_case : Union[str, Any] = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case : Any = target_dict.pad_index
snake_case : Optional[int] = target_dict.bos_index
snake_case : List[Any] = target_dict.eos_index
snake_case : int = len(target_dict.symbols )
snake_case : Tuple = os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __snake_case )
snake_case : Any = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
snake_case : Optional[int] = True if config.feat_extract_norm == '''layer''' else False
snake_case : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
snake_case : Union[str, Any] = HubertForCTC(__snake_case )
else:
snake_case : Any = HubertModel(__snake_case )
if is_finetuned:
snake_case, snake_case, snake_case : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case, snake_case, snake_case : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case : int = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 712 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 555 | 0 |
lowercase_ = 8.31_4462 # Unit - J mol-1 K-1
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 669 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :str = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :Tuple = 8
else:
UpperCamelCase__ :int = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=__a )
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def a ( __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :List[Any] = config['''lr''']
UpperCamelCase__ :List[str] = int(config['''num_epochs'''] )
UpperCamelCase__ :int = int(config['''seed'''] )
UpperCamelCase__ :str = int(config['''batch_size'''] )
UpperCamelCase__ :List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase__ :Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase__ :Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase__ :Dict = MAX_GPU_BATCH_SIZE
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Union[str, Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCamelCase__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ :Dict = model(**__a )
UpperCamelCase__ :Union[str, Any] = outputs.loss
UpperCamelCase__ :Tuple = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :int = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCamelCase__ :int = parser.parse_args()
UpperCamelCase__ :int = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 189 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_snake_case = None
_snake_case = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_snake_case = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool =True
SCREAMING_SNAKE_CASE_ : Optional[str] =None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] ="PIL.Image.Image"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
SCREAMING_SNAKE_CASE_ : str =field(default="Image" , init=__magic_name__ , repr=__magic_name__ )
def __call__( self : Dict ):
"""simple docstring"""
return self.pa_type
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE__ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
UpperCamelCase = {}
UpperCamelCase , UpperCamelCase = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = PIL.Image.open(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = path.split('::' )[-1]
try:
UpperCamelCase = string_to_dict(SCREAMING_SNAKE_CASE__ , config.HUB_DATASETS_URL )['repo_id']
UpperCamelCase = token_per_repo_id.get(SCREAMING_SNAKE_CASE__ )
except ValueError:
UpperCamelCase = None
with xopen(SCREAMING_SNAKE_CASE__ , 'rb' , use_auth_token=SCREAMING_SNAKE_CASE__ ) as f:
UpperCamelCase = BytesIO(f.read() )
UpperCamelCase = PIL.Image.open(bytes_ )
else:
UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
UpperCamelCase = storage.field('bytes' )
else:
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
UpperCamelCase = storage.field('path' )
else:
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE__ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
with xopen(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
UpperCamelCase = f.read()
return bytes_
UpperCamelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE__ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( _lowercase ) -> bytes:
UpperCamelCase = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase = image.format
else:
UpperCamelCase = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def __lowerCamelCase ( _lowercase ) -> dict:
if hasattr(_lowercase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def __lowerCamelCase ( _lowercase ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
UpperCamelCase = array.dtype
UpperCamelCase = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
UpperCamelCase = dtype.kind
UpperCamelCase = dtype.itemsize
UpperCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCamelCase = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def __lowerCamelCase ( _lowercase ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
UpperCamelCase , UpperCamelCase = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCamelCase = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCamelCase = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 170 | 1 |
'''simple docstring'''
__snake_case: int = {str(digit): digit**5 for digit in range(10)}
def _snake_case ( A_ : Tuple ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A_ ) )
def _snake_case ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(A_ ) )
if __name__ == "__main__":
print(solution())
| 577 |
def _SCREAMING_SNAKE_CASE ( snake_case = 1_0_0_0 ) -> int:
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = []
for i in range(1 , n + 1 ):
_UpperCAmelCase = prev_numerator + 2 * prev_denominator
_UpperCAmelCase = prev_numerator + prev_denominator
if len(str(snake_case ) ) > len(str(snake_case ) ):
result.append(snake_case )
_UpperCAmelCase = numerator
_UpperCAmelCase = denominator
return len(snake_case )
if __name__ == "__main__":
print(F'{solution() = }') | 518 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __snake_case ( UpperCamelCase ) -> List[str]:
"""simple docstring"""
return choice(UpperCamelCase )
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
a__ = random_pivot(UpperCamelCase )
# partition based on pivot
# linear time
a__ = [e for e in lst if e < pivot]
a__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCamelCase ) < k - 1:
return kth_number(UpperCamelCase , k - len(UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
"""simple docstring"""
from __future__ import annotations
__lowerCAmelCase : Optional[int] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
a__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the reference grid
a__ = 1
a__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the action grid
a__ = init[0]
a__ = init[1]
a__ = 0
a__ = g + heuristic[x][y] # cost from starting cell to destination cell
a__ = [[f, g, x, y]]
a__ = False # flag that is set when search is complete
a__ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a__ = cell.pop()
a__ = next_cell[2]
a__ = next_cell[3]
a__ = next_cell[1]
if x == goal[0] and y == goal[1]:
a__ = True
else:
for i in range(len(UpperCamelCase ) ): # to try out different valid actions
a__ = x + DIRECTIONS[i][0]
a__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a__ = g + cost
a__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a__ = 1
a__ = i
a__ = []
a__ = goal[0]
a__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a__ = x - DIRECTIONS[action[x][y]][0]
a__ = y - DIRECTIONS[action[x][y]][1]
a__ = xa
a__ = ya
invpath.append([x, y] )
a__ = []
for i in range(len(UpperCamelCase ) ):
path.append(invpath[len(UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCAmelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCAmelCase : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
__lowerCAmelCase : Optional[Any] = [len(grid) - 1, len(grid[0]) - 1]
__lowerCAmelCase : Optional[int] = 1
# the cost map which pushes the path closer to the goal
__lowerCAmelCase : str = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCAmelCase : Optional[int] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 158 | 1 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : pyspark.sql.DataFrame , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "arrow" , **lowerCAmelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = load_from_cache_file
_UpperCAmelCase : Tuple = file_format
_UpperCAmelCase : Any = Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 494 | '''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''deberta-v2'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_2_8_1_0_0 , lowerCAmelCase__ : Optional[int]=1_5_3_6 , lowerCAmelCase__ : Dict=2_4 , lowerCAmelCase__ : Optional[Any]=2_4 , lowerCAmelCase__ : str=6_1_4_4 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=-1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Optional[int]="gelu" , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Dict = relative_attention
_UpperCAmelCase : Tuple = max_relative_positions
_UpperCAmelCase : Optional[int] = pad_token_id
_UpperCAmelCase : Optional[int] = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase__ ) == str:
_UpperCAmelCase : List[Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
_UpperCAmelCase : Any = pos_att_type
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = kwargs.get("pooler_hidden_size" , lowerCAmelCase__ )
_UpperCAmelCase : Any = pooler_dropout
_UpperCAmelCase : Any = pooler_hidden_act
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 1_2
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional["TensorType"] = None , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=lowerCAmelCase__ , framework=lowerCAmelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 494 | 1 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A__: Dict = '''bart'''
A__: str = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
if LOAD_DENSE_INDEX:
_a : Dict =AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_a : Optional[Any] =AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_a : Optional[int] =qar_model.eval()
else:
_a : str =(None, None)
if MODEL_TYPE == "bart":
_a : Optional[int] =AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_a : Optional[Any] =AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_a : Union[str, Any] =torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_a : Tuple =sas_model.eval()
else:
_a : Dict =make_qa_sas_model(
model_name="""t5-small""" ,from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" ,device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_a : List[str] =faiss.StandardGpuResources()
_a : Optional[int] =datasets.load_dataset(path="""wiki_snippets""" ,name="""wiki40b_en_100_0""" )["""train"""]
_a : int =np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" ,dtype="""float32""" ,mode="""r""" ,shape=(wikiaab_passages.num_rows, 128) ,)
_a : Union[str, Any] =faiss.IndexFlatIP(128 )
_a : str =faiss.index_cpu_to_gpu(_UpperCAmelCase ,1 ,_UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
_a : Any =(None, None)
_a : Optional[Any] =Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_a : int =datasets.load_dataset("""eli5""" ,name="""LFQA_reddit""" )
_a : Optional[int] =elia["""train_eli5"""]
_a : Any =np.memmap(
"""eli5_questions_reps.dat""" ,dtype="""float32""" ,mode="""r""" ,shape=(elia_train.num_rows, 128) )
_a : Dict =faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
A__: Any = load_indexes()
A__: int = load_models()
A__: str = load_train_data()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str]=10 ) -> List[str]:
_a : Optional[int] =embed_questions_for_retrieval([question] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Any =eli5_train_q_index.search(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] =[elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any]="wiki40b" ,_UpperCAmelCase : Tuple="dense" ,_UpperCAmelCase : Union[str, Any]=10 ) -> Dict:
if source == "none":
_a : List[str] =(""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_a : Optional[Any] =query_qa_dense_index(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
else:
_a : Union[str, Any] =query_es_index(
_UpperCAmelCase ,_UpperCAmelCase ,index_name="""english_wiki40b_snippets_100w""" ,n_results=_UpperCAmelCase ,)
_a : List[str] =[
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_a : Any ="""question: {} context: {}""".format(_UpperCAmelCase ,_UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str]=64 ,_UpperCAmelCase : Any=256 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : int=0.9_5 ,_UpperCAmelCase : List[str]=0.8 ) -> List[str]:
with torch.no_grad():
_a : List[str] =qa_sas_generate(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,num_answers=1 ,num_beams=_UpperCAmelCase ,min_len=_UpperCAmelCase ,max_len=_UpperCAmelCase ,do_sample=_UpperCAmelCase ,temp=_UpperCAmelCase ,top_p=_UpperCAmelCase ,top_k=_UpperCAmelCase ,max_input_length=1024 ,device="""cuda:0""" ,)[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
A__: Optional[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
A__: Optional[int] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A__: List[str] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
A__: str = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
A__: Optional[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
A__: Dict = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
A__: Optional[Any] = action_list.index(action_st)
A__: List[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
A__: str = show_type == '''Show full text of passages'''
else:
A__: Dict = 3
A__: int = True
A__: Dict = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
A__: Optional[int] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
A__: int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
A__: int = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
A__: List[Any] = '''wiki40b'''
A__: Optional[int] = '''dense'''
A__: int = '''beam'''
A__: Dict = 2
A__: Tuple = 64
A__: Any = 256
A__: List[Any] = None
A__: int = None
A__: Optional[Any] = st.sidebar.checkbox('''Generation options''')
if generate_options:
A__: Optional[int] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
A__: int = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
A__: Any = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A__: Tuple = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A__: List[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A__: Union[str, Any] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A__: Dict = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A__: Union[str, Any] = None
# start main text
A__: Union[str, Any] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
A__: Union[str, Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A__: List[Any] = st.text_input('''Enter your question here:''', '''''')
else:
A__: Any = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
A__: Tuple = make_support(question, source=wiki_source, method='''dense''', n_results=10)
A__: Tuple = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
A__: List[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A__: Dict = support_list[:10]
A__: List[Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
A__: Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A__: Dict = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
A__: Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
A__: Dict = res[1].strip()
if sec_titles == "":
A__: Any = '''[{}]({})'''.format(res[0], wiki_url)
else:
A__: List[Any] = sec_titles.split(''' & ''')
A__: List[str] = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
A__: Any = find_nearest_training(question)
A__: Dict = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
A__: Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
A__: str = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 704 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
A__: Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A__: Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ) -> Optional[Any]:
_a : List[str] =state_dict.pop(_UpperCAmelCase )
_a : Tuple =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> List[str]:
_a : Optional[Any] =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_a : List[str] =key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" )
_a : int =value
else:
_a : Any =value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> int:
_a : List[str] =""""""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a : int =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_a : Optional[Any] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : str =in_proj_weight[:256, :]
_a : List[str] =in_proj_bias[:256]
_a : Optional[int] =in_proj_weight[256:512, :]
_a : List[str] =in_proj_bias[256:512]
_a : Optional[int] =in_proj_weight[-256:, :]
_a : Tuple =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a : Tuple =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_a : str =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] =in_proj_weight[:256, :]
_a : List[Any] =in_proj_bias[:256]
_a : Tuple =in_proj_weight[256:512, :]
_a : str =in_proj_bias[256:512]
_a : Any =in_proj_weight[-256:, :]
_a : int =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a : Any =state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_a : int =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a : int =in_proj_weight_cross_attn[:256, :]
_a : Any =in_proj_bias_cross_attn[:256]
_a : str =in_proj_weight_cross_attn[256:512, :]
_a : Dict =in_proj_bias_cross_attn[256:512]
_a : Any =in_proj_weight_cross_attn[-256:, :]
_a : Union[str, Any] =in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Any ) -> int:
_a , _a : Union[str, Any] =image.size
_a : Dict =max(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] =800 if """detection""" in checkpoint_url else 1000
_a : Any =target_max_size / current_max_size
_a : int =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> int:
_a : Optional[Any] =F.to_tensor(_UpperCAmelCase )
_a : Tuple =F.normalize(_UpperCAmelCase ,mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ) -> Optional[int]:
logger.info("""Converting model...""" )
# load original state dict
_a : Dict =torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] =rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a : Dict ="""model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_a : Any =state_dict.pop(_UpperCAmelCase )
_a : List[Any] =val
# create HuggingFace model and load state dict
_a : int =TableTransformerConfig(
backbone="""resnet18""" ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
_a : Union[str, Any] =15
_a : Tuple =2
_a : Optional[Any] ={0: """table""", 1: """table rotated"""}
_a : Tuple =idalabel
_a : List[Any] ={v: k for k, v in idalabel.items()}
else:
_a : Union[str, Any] =125
_a : int =6
_a : int ={
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_a : List[str] =idalabel
_a : Optional[int] ={v: k for k, v in idalabel.items()}
_a : Optional[int] =DetrImageProcessor(
format="""coco_detection""" ,max_size=800 if """detection""" in checkpoint_url else 1000 )
_a : Optional[Any] =TableTransformerForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion
_a : List[Any] ="""example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_a : str =hf_hub_download(repo_id="""nielsr/example-pdf""" ,repo_type="""dataset""" ,filename=_UpperCAmelCase )
_a : Tuple =Image.open(_UpperCAmelCase ).convert("""RGB""" )
_a : Dict =normalize(resize(_UpperCAmelCase ,_UpperCAmelCase ) ).unsqueeze(0 )
_a : List[str] =model(_UpperCAmelCase )
if "detection" in checkpoint_url:
_a : Any =(1, 15, 3)
_a : int =torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
_a : str =torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
_a : str =(1, 125, 7)
_a : str =torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
_a : int =torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_UpperCAmelCase ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_a : Dict =(
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_UpperCAmelCase )
image_processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__: Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 506 | 0 |
import math
import os
import sys
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Dict = ''''''
try:
with open(__lowerCAmelCase , '''rb''' ) as binary_file:
snake_case__ : List[str] = binary_file.read()
for dat in data:
snake_case__ : int = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
lexicon.pop(__lowerCAmelCase )
snake_case__ : Optional[Any] = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
snake_case__ : int = '''0''' + lexicon[curr_key]
snake_case__ : Tuple = bin(__lowerCAmelCase )[2:]
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
snake_case__ , snake_case__ : str = '''''', ''''''
snake_case__ : Optional[int] = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case__ : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
snake_case__ : Any = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
snake_case__ : Optional[Any] = lexicon[curr_string]
result += last_match_id
return result
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Dict = os.path.getsize(__lowerCAmelCase )
snake_case__ : Dict = bin(__lowerCAmelCase )[2:]
snake_case__ : List[str] = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = 8
try:
with open(__lowerCAmelCase , '''wb''' ) as opened_file:
snake_case__ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : int = read_file_binary(__lowerCAmelCase )
snake_case__ : List[str] = compress_data(__lowerCAmelCase )
snake_case__ : Tuple = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 252 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__lowerCAmelCase , __lowerCAmelCase )
return actual_power(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 252 | 1 |
'''simple docstring'''
def __lowercase (_lowercase ) -> bool:
"""simple docstring"""
if num < 0:
return False
__lowerCamelCase : int = num
__lowerCamelCase : int = 0
while num > 0:
__lowerCamelCase : List[str] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 483 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ :List[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict = 'sew-d'
def __init__( self : Tuple , A__ : Optional[int]=32 , A__ : Optional[int]=768 , A__ : Any=12 , A__ : List[str]=12 , A__ : List[Any]=3072 , A__ : str=2 , A__ : Dict=512 , A__ : Optional[Any]=256 , A__ : Optional[Any]=True , A__ : Any=True , A__ : List[str]=("p2c", "c2p") , A__ : List[Any]="layer_norm" , A__ : Union[str, Any]="gelu_python" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : int=0.1 , A__ : Dict=0.0 , A__ : Optional[Any]=0.1 , A__ : Dict=0.02 , A__ : Dict=1e-7 , A__ : List[Any]=1e-5 , A__ : Any="group" , A__ : Any="gelu" , A__ : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A__ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__ : str=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__ : Optional[Any]=False , A__ : Any=128 , A__ : Optional[Any]=16 , A__ : Union[str, Any]=True , A__ : Any=0.05 , A__ : List[str]=10 , A__ : Union[str, Any]=2 , A__ : Dict=0.0 , A__ : str=10 , A__ : Tuple=0 , A__ : Any="mean" , A__ : Optional[int]=False , A__ : int=False , A__ : List[str]=256 , A__ : Union[str, Any]=0 , A__ : int=1 , A__ : Optional[Any]=2 , **A__ : List[Any] , ):
"""simple docstring"""
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Tuple = feat_extract_norm
__lowerCamelCase : Tuple = feat_extract_activation
__lowerCamelCase : List[Any] = list(A__ )
__lowerCamelCase : int = list(A__ )
__lowerCamelCase : Optional[Any] = list(A__ )
__lowerCamelCase : Tuple = conv_bias
__lowerCamelCase : List[str] = num_conv_pos_embeddings
__lowerCamelCase : Tuple = num_conv_pos_embedding_groups
__lowerCamelCase : int = len(self.conv_dim )
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = squeeze_factor
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : int = position_buckets
__lowerCamelCase : Tuple = share_att_key
__lowerCamelCase : Any = relative_attention
__lowerCamelCase : Any = norm_rel_ebd
__lowerCamelCase : Dict = list(A__ )
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : List[str] = hidden_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = feat_proj_dropout
__lowerCamelCase : Union[str, Any] = final_dropout
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Tuple = feature_layer_norm_eps
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase : Any = apply_spec_augment
__lowerCamelCase : str = mask_time_prob
__lowerCamelCase : Tuple = mask_time_length
__lowerCamelCase : Optional[int] = mask_time_min_masks
__lowerCamelCase : Dict = mask_feature_prob
__lowerCamelCase : Optional[Any] = mask_feature_length
__lowerCamelCase : str = mask_feature_min_masks
# ctc loss
__lowerCamelCase : Any = ctc_loss_reduction
__lowerCamelCase : str = ctc_zero_infinity
# sequence classification
__lowerCamelCase : Dict = use_weighted_layer_sum
__lowerCamelCase : List[Any] = classifier_proj_size
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 483 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowercase_ ( _lowerCAmelCase , _lowerCAmelCase ):
__magic_name__ : Optional[Any] = "dinat"
__magic_name__ : Optional[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , _lowercase : str=4 , _lowercase : Dict=3 , _lowercase : Optional[int]=6_4 , _lowercase : str=[3, 4, 6, 5] , _lowercase : str=[2, 4, 8, 1_6] , _lowercase : Dict=7 , _lowercase : Any=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowercase : Tuple=3.0 , _lowercase : Union[str, Any]=True , _lowercase : int=0.0 , _lowercase : Union[str, Any]=0.0 , _lowercase : Tuple=0.1 , _lowercase : int="gelu" , _lowercase : List[str]=0.02 , _lowercase : Optional[Any]=1e-5 , _lowercase : int=0.0 , _lowercase : Dict=None , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Any = depths
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Any = num_heads
lowerCAmelCase__ : int = kernel_size
lowerCAmelCase__ : Any = dilations
lowerCAmelCase__ : Union[str, Any] = mlp_ratio
lowerCAmelCase__ : Optional[int] = qkv_bias
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : Any = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCAmelCase__ : str = layer_scale_init_value
lowerCAmelCase__ : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
| 308 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCamelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "sequence-classification"
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Any:
if type(SCREAMING_SNAKE_CASE__ ) == dict:
A__ = Namespace(**SCREAMING_SNAKE_CASE__ )
A__ = glue_output_modes[hparams.task]
A__ = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.mode )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return self.model(**SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A__ = self(**SCREAMING_SNAKE_CASE__ )
A__ = outputs[0]
A__ = self.trainer.lr_schedulers[0]["scheduler"]
A__ = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case__ ( self ) -> List[str]:
A__ = self.hparams
A__ = processors[args.task]()
A__ = processor.get_labels()
for mode in ["train", "dev"]:
A__ = self._feature_file(SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , SCREAMING_SNAKE_CASE__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
A__ = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
A__ = convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader:
A__ = "dev" if mode == "test" else mode
A__ = self._feature_file(SCREAMING_SNAKE_CASE__ )
logger.info("Loading features from cached file %s" , SCREAMING_SNAKE_CASE__ )
A__ = torch.load(SCREAMING_SNAKE_CASE__ )
A__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A__ = self(**SCREAMING_SNAKE_CASE__ )
A__ , A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> tuple:
A__ = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
A__ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A__ = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A__ = np.squeeze(SCREAMING_SNAKE_CASE__ )
A__ = np.concatenate([x["target"] for x in outputs] , axis=0 )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> dict:
A__ , A__ , A__ = self._eval_end(SCREAMING_SNAKE_CASE__ )
A__ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> dict:
A__ , A__ , A__ = self._eval_end(SCREAMING_SNAKE_CASE__ )
A__ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=SCREAMING_SNAKE_CASE__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def _lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = argparse.ArgumentParser()
add_generic_args(UpperCAmelCase_, os.getcwd() )
A__ = GLUETransformer.add_model_specific_args(UpperCAmelCase_, os.getcwd() )
A__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A__ = os.path.join(
"./results", F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""", )
os.makedirs(args.output_dir )
A__ = GLUETransformer(UpperCAmelCase_ )
A__ = generic_train(UpperCAmelCase_, UpperCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A__ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt" ), recursive=UpperCAmelCase_ ) )
A__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 104 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
def __UpperCamelCase( _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = torch.load(_A , map_location='''cpu''' )
if "model" in sd.keys():
UpperCAmelCase__ : int = torch.load(_A , map_location='''cpu''' )['''model''']
# pop unnecessary weights
UpperCAmelCase__ : Any = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_A )
UpperCAmelCase__ : List[str] = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase__ : Dict = sd.pop(_A )
UpperCAmelCase__ : List[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase__ : Any = sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase__ : Dict = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
UpperCAmelCase__ : str = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
UpperCAmelCase__ : Dict = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
UpperCAmelCase__ : Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = torch.split(_A , depth // 3 , dim=0 )
UpperCAmelCase__ : Optional[Any] = q
UpperCAmelCase__ : List[str] = k
UpperCAmelCase__ : int = v
del sd[key]
return sd
@torch.no_grad()
def __UpperCamelCase( _A : int , _A : Any , _A : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = load_checkpoint(_A )
if config is not None:
UpperCAmelCase__ : Optional[Any] = OPTConfig.from_pretrained(_A )
else:
UpperCAmelCase__ : Union[str, Any] = OPTConfig()
UpperCAmelCase__ : Dict = OPTModel(_A ).half().eval()
model.load_state_dict(_A )
# Check results
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCamelCase__ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 496 | '''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ['''image_processor''', '''tokenizer''']
UpperCAmelCase_ : str = '''CLIPImageProcessor'''
UpperCAmelCase_ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowerCamelCase_ ,)
UpperCAmelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase__ : Tuple = self.tokenizer(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if images is not None:
UpperCAmelCase__ : Optional[int] = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if text is not None and images is not None:
UpperCAmelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def lowerCAmelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer.model_input_names
UpperCAmelCase__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowerCamelCase_ ,)
return self.image_processor_class
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowerCamelCase_ ,)
return self.image_processor
| 496 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
SCREAMING_SNAKE_CASE_: Union[str, Any] =['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : str , __a : Optional[Any] , __a : int=None , __a : str=1 ):
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = dataset
UpperCAmelCase_ = len(__a ) if n_tasks is None else n_tasks
UpperCAmelCase_ = n_copies
def __iter__(self : List[Any] ):
UpperCAmelCase_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
UpperCAmelCase_ = self.tokenizer(__a , padding=__a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : Any , __a : Any , __a : Dict ):
UpperCAmelCase_ = start_length
UpperCAmelCase_ = eof_strings
UpperCAmelCase_ = tokenizer
def __call__(self : Dict , __a : List[Any] , __a : int , **__a : Dict ):
UpperCAmelCase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__a )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = re.split("(%s)" % "|".join(snake_case_ ) , snake_case_ )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple=20 , **snake_case_ : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = defaultdict(snake_case_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(snake_case_ ) ):
with torch.no_grad():
UpperCAmelCase_ = batch["ids"].shape[-1]
UpperCAmelCase_ = accelerator.unwrap_model(snake_case_ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=snake_case_ , **snake_case_ )
# each task is generated batch_size times
UpperCAmelCase_ = batch["task_id"].repeat(snake_case_ )
UpperCAmelCase_ = accelerator.pad_across_processes(
snake_case_ , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ = generated_tokens.cpu().numpy()
UpperCAmelCase_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(snake_case_ , snake_case_ ):
gen_token_dict[task].append(snake_case_ )
UpperCAmelCase_ = [[] for _ in range(snake_case_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
code_gens[task].append(remove_last_block(snake_case_ ) )
return code_gens
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser(snake_case_ )
UpperCAmelCase_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ = "false"
if args.num_workers is None:
UpperCAmelCase_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ = Accelerator()
set_seed(args.seed , device_specific=snake_case_ )
# Load model and tokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ = tokenizer.eos_token
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , snake_case_ , snake_case_ )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ = load_dataset("openai_humaneval" )
UpperCAmelCase_ = load_metric("code_eval" )
UpperCAmelCase_ = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
UpperCAmelCase_ = args.n_samples // args.batch_size
UpperCAmelCase_ = TokenizedDataset(snake_case_ , human_eval["test"] , n_copies=snake_case_ , n_tasks=snake_case_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
UpperCAmelCase_ = complete_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , n_tasks=snake_case_ , batch_size=args.batch_size , **snake_case_ , )
if accelerator.is_main_process:
UpperCAmelCase_ = []
for task in tqdm(range(snake_case_ ) ):
UpperCAmelCase_ = human_eval["test"][task]["test"]
UpperCAmelCase_ = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_ , UpperCAmelCase_ = code_eval_metric.compute(
references=snake_case_ , predictions=snake_case_ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 78 | class a__ :
def __init__( self : int,_A : Union[str, Any],_A : Dict,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : str = graph
self._normalize_graph(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
def __UpperCamelCase ( self : Any,_A : str,_A : str ):
"""simple docstring"""
if sources is int:
SCREAMING_SNAKE_CASE_ : Dict = [sources]
if sinks is int:
SCREAMING_SNAKE_CASE_ : Optional[int] = [sinks]
if len(_A ) == 0 or len(_A ) == 0:
return
SCREAMING_SNAKE_CASE_ : Dict = sources[0]
SCREAMING_SNAKE_CASE_ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_A ) > 1 or len(_A ) > 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0,0 )
self.graph.insert(0,[0] * size )
for i in sources:
SCREAMING_SNAKE_CASE_ : List[str] = max_input_flow
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
SCREAMING_SNAKE_CASE_ : str = max_input_flow
SCREAMING_SNAKE_CASE_ : str = size - 1
def __UpperCamelCase ( self : str ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = algorithm(self )
class a__ :
def __init__( self : List[str],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = flow_network
SCREAMING_SNAKE_CASE_ : str = flow_network.verticesCount
SCREAMING_SNAKE_CASE_ : Dict = flow_network.sourceIndex
SCREAMING_SNAKE_CASE_ : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE_ : Optional[int] = flow_network.graph
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE_ : Dict = True
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
class a__ ( A__ ):
def __init__( self : Tuple,_A : Union[str, Any] ):
"""simple docstring"""
super().__init__(_A )
# use this to save your result
SCREAMING_SNAKE_CASE_ : int = -1
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class a__ ( A__ ):
def __init__( self : Optional[Any],_A : Optional[int] ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
SCREAMING_SNAKE_CASE_ : int = [0] * self.verticies_count
SCREAMING_SNAKE_CASE_ : List[str] = [0] * self.verticies_count
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE_ : str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
while i < len(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vertices_list[i]
SCREAMING_SNAKE_CASE_ : Any = self.heights[vertex_index]
self.process_vertex(_A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0,vertices_list.pop(_A ) )
SCREAMING_SNAKE_CASE_ : str = 0
else:
i += 1
SCREAMING_SNAKE_CASE_ : List[Any] = sum(self.preflow[self.source_index] )
def __UpperCamelCase ( self : Dict,_A : Tuple ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_A,_A )
self.relabel(_A )
def __UpperCamelCase ( self : int,_A : Optional[Any],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = min(
self.excesses[from_index],self.graph[from_index][to_index] - self.preflow[from_index][to_index],)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase ( self : Tuple,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE_ : int = self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE_ : Dict = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : str = [0]
__lowerCamelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : Any = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : Optional[Any] = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 216 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(features=__lowerCAmelCase )
lowerCamelCase__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
import torch
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and column:
if all(
isinstance(__lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCAmelCase )
return column
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
import torch
if isinstance(__lowerCAmelCase , (str, bytes, type(__lowerCAmelCase )) ):
return value
elif isinstance(__lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ = {}
if isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCamelCase__ = {'''dtype''': torch.intaa}
elif isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowerCamelCase__ = np.asarray(__lowerCAmelCase )
return torch.tensor(__lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCAmelCase , '''__array__''' ) and not isinstance(__lowerCAmelCase , torch.Tensor ):
lowerCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __lowerCAmelCase , map_list=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_row(__lowerCAmelCase )
lowerCamelCase__ = self.python_features_decoder.decode_row(__lowerCAmelCase )
return self.recursive_tensorize(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_column(__lowerCAmelCase )
lowerCamelCase__ = self.python_features_decoder.decode_column(__lowerCAmelCase , pa_table.column_names[0] )
lowerCamelCase__ = self.recursive_tensorize(__lowerCAmelCase )
lowerCamelCase__ = self._consolidate(__lowerCAmelCase )
return column
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_batch(__lowerCAmelCase )
lowerCamelCase__ = self.python_features_decoder.decode_batch(__lowerCAmelCase )
lowerCamelCase__ = self.recursive_tensorize(__lowerCAmelCase )
for column_name in batch:
lowerCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 29 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29 | 1 |
"""simple docstring"""
import os
import sys
__UpperCAmelCase = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCAmelCase = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 65 |
"""simple docstring"""
import re
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : List[Any] = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase_ ,lowercase_ ) )
if __name__ == "__main__":
lowerCamelCase__ = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 624 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase ).loss
_UpperCAmelCase = -tf.math.reduce_mean(lowerCamelCase ).numpy()
_UpperCAmelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 ) | 402 |
import math
def _SCREAMING_SNAKE_CASE ( ) -> None:
_UpperCAmelCase = input("""Enter message: """ )
_UpperCAmelCase = int(input(f"""Enter key [2-{len(__snake_case ) - 1}]: """ ) )
_UpperCAmelCase = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
_UpperCAmelCase = encrypt_message(__snake_case , __snake_case )
elif mode.lower().startswith("""d""" ):
_UpperCAmelCase = decrypt_message(__snake_case , __snake_case )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str:
_UpperCAmelCase = [""""""] * key
for col in range(__snake_case ):
_UpperCAmelCase = col
while pointer < len(__snake_case ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str:
_UpperCAmelCase = math.ceil(len(__snake_case ) / key )
_UpperCAmelCase = key
_UpperCAmelCase = (num_cols * num_rows) - len(__snake_case )
_UpperCAmelCase = [""""""] * num_cols
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_UpperCAmelCase = 0
row += 1
return "".join(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 402 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class __SCREAMING_SNAKE_CASE( _lowerCAmelCase ):
def __init__( self: Optional[int] , UpperCamelCase: str=None , UpperCamelCase: Union[str, Any]=None , *UpperCamelCase: List[str] , **UpperCamelCase: Tuple ) -> Any:
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if config is None:
assert isinstance(self.model , UpperCAmelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
snake_case__ = self.model.config
else:
snake_case__ = config
snake_case__ = data_args
snake_case__ = self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
snake_case__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case__ = label_smoothed_nll_loss
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int ) -> str:
if self.optimizer is None:
snake_case__ = ['bias', 'LayerNorm.weight']
snake_case__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
snake_case__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case__ = Adafactor
snake_case__ = {'scale_parameter': False, 'relative_step': False}
else:
snake_case__ = AdamW
snake_case__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
snake_case__ = self.args.learning_rate
if self.sharded_ddp:
snake_case__ = OSS(
params=UpperCAmelCase_ , optim=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
snake_case__ = optimizer_cls(UpperCAmelCase_ , **UpperCAmelCase_ )
if self.lr_scheduler is None:
snake_case__ = self._get_lr_scheduler(UpperCAmelCase_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: str ) -> Any:
snake_case__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase_ )
return scheduler
def lowerCAmelCase_ ( self: str ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int ) -> Optional[int]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case__ = model(**UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[0]
snake_case__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case__ , snake_case__ = model(**UpperCAmelCase_ , labels=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[:2]
else:
# compute label smoothed loss
snake_case__ = model(**UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[0]
snake_case__ = torch.nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
snake_case__ , snake_case__ = self.loss_fn(UpperCAmelCase_ , UpperCAmelCase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: List[Any] ) -> Union[str, Any]:
snake_case__ = inputs.pop('labels' )
snake_case__ , snake_case__ = self._compute_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return loss
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: nn.Module , UpperCamelCase: Dict[str, Union[torch.Tensor, Any]] , UpperCamelCase: bool , UpperCamelCase: Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case__ = self._prepare_inputs(UpperCAmelCase_ )
snake_case__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case__ = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **UpperCAmelCase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case__ = self._pad_tensors_to_max_len(UpperCAmelCase_ , gen_kwargs['max_length'] )
snake_case__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
snake_case__ , snake_case__ = self._compute_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case__ = self._pad_tensors_to_max_len(UpperCAmelCase_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict ) -> int:
snake_case__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
snake_case__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case__ = tensor
return padded_tensor
| 328 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = '''deberta-v2'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=128_100 , UpperCAmelCase_ : List[str]=1_536 , UpperCAmelCase_ : List[str]=24 , UpperCAmelCase_ : int=24 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[Any]=1e-7 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Optional[Any]="gelu" , **UpperCAmelCase_ : Optional[int] , )-> int:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = relative_attention
UpperCamelCase = max_relative_positions
UpperCamelCase = pad_token_id
UpperCamelCase = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
UpperCamelCase = [x.strip() for x in pos_att_type.lower().split("|" )]
UpperCamelCase = pos_att_type
UpperCamelCase = vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = kwargs.get("pooler_hidden_size" , UpperCAmelCase_ )
UpperCamelCase = pooler_dropout
UpperCamelCase = pooler_hidden_act
class __a ( _lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> int:
"""simple docstring"""
return 12
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 554 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCAmelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_UpperCAmelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowerCamelCase ( ):
"""simple docstring"""
_lowerCamelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_lowerCamelCase = bs[:]
_lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
_lowerCamelCase = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = set()
_lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase = char
return pairs
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , **a__ , ):
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase = json.load(a__ )
_lowerCamelCase = {v: k for k, v in self.encoder.items()}
_lowerCamelCase = errors # how to handle errors in decoding
_lowerCamelCase = bytes_to_unicode()
_lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding='''utf-8''' ) as merges_handle:
_lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCamelCase = {}
_lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _UpperCAmelCase ( self ):
return len(self.encoder )
def _UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , a__ ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase = tuple(a__ )
_lowerCamelCase = get_pairs(a__ )
if not pairs:
return token
while True:
_lowerCamelCase = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase = bigram
_lowerCamelCase = []
_lowerCamelCase = 0
while i < len(a__ ):
try:
_lowerCamelCase = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase = tuple(a__ )
_lowerCamelCase = new_word
if len(a__ ) == 1:
break
else:
_lowerCamelCase = get_pairs(a__ )
_lowerCamelCase = ''' '''.join(a__ )
_lowerCamelCase = word
return word
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = []
for token in re.findall(self.pat , a__ ):
_lowerCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(''' ''' ) )
return bpe_tokens
def _UpperCAmelCase ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , a__ ):
return self.decoder.get(a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = ''''''.join(a__ )
_lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _UpperCAmelCase ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + '''\n''' )
_lowerCamelCase = 0
with open(a__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_lowerCamelCase = token_index
writer.write(''' '''.join(a__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def _UpperCAmelCase ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , a__ , a__=False , **a__ ):
_lowerCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
_lowerCamelCase = ''' ''' + text
return (text, kwargs)
| 717 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "xlm-prophetnet"
_UpperCamelCase = ["past_key_values"]
_UpperCamelCase = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , a__ = 0.1 , a__ = "gelu" , a__ = 3_05_22 , a__ = 10_24 , a__ = 40_96 , a__ = 12 , a__ = 16 , a__ = 40_96 , a__ = 12 , a__ = 16 , a__ = 0.1 , a__ = 0.1 , a__ = 5_12 , a__ = 0.02 , a__ = True , a__ = True , a__ = 0 , a__ = 2 , a__ = 32 , a__ = 1_28 , a__ = False , a__ = 0.0 , a__ = True , a__ = 0 , a__ = 1 , a__ = 2 , **a__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = num_encoder_layers
_lowerCamelCase = num_encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = num_decoder_layers
_lowerCamelCase = num_decoder_attention_heads
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = init_std # Normal(0, this parameter)
_lowerCamelCase = activation_function
# parameters for xlmprophetnet
_lowerCamelCase = ngram
_lowerCamelCase = num_buckets
_lowerCamelCase = relative_max_distance
_lowerCamelCase = disable_ngram_loss
_lowerCamelCase = eps
# 3 Types of Dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = dropout
_lowerCamelCase = use_cache
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , add_cross_attention=a__ , decoder_start_token_id=a__ , **a__ , )
@property
def _UpperCAmelCase ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCAmelCase ( self , a__ ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 297 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( _lowercase , unittest.TestCase ):
snake_case__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase__ ( self : Any , UpperCAmelCase : List[str]=0 ):
__lowerCamelCase : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
__lowerCamelCase : Optional[int] = np.random.RandomState(A_ )
__lowerCamelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.7_5,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=A_ )
__lowerCamelCase : List[Any] = self.get_dummy_inputs()
__lowerCamelCase : List[Any] = pipe(**A_ ).images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs()
__lowerCamelCase : List[str] = pipe(**A_ ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : Any = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs() )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
__lowerCamelCase : int = pipe(**A_ ).images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : List[str] = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__lowerCamelCase : int = self.get_dummy_inputs()
__lowerCamelCase : Dict = pipe(**A_ ).images
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : int = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : str = pipe(**A_ ).images
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : List[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowerCamelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
__lowerCamelCase : Any = self.get_dummy_inputs()
__lowerCamelCase : str = pipe(**A_ ).images
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCamelCase : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[str] = ort.SessionOptions()
__lowerCamelCase : Any = False
return options
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowerCamelCase : Dict = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowerCamelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__lowerCamelCase : List[Any] = "A fantasy landscape, trending on artstation"
__lowerCamelCase : Any = np.random.RandomState(0 )
__lowerCamelCase : Union[str, Any] = pipe(
prompt=A_ , image=A_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="np" , )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : str = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase : Optional[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowerCamelCase : List[str] = init_image.resize((768, 512) )
__lowerCamelCase : int = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
__lowerCamelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
__lowerCamelCase : Optional[int] = "A fantasy landscape, trending on artstation"
__lowerCamelCase : Optional[Any] = np.random.RandomState(0 )
__lowerCamelCase : str = pipe(
prompt=A_ , image=A_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="np" , )
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowerCamelCase : Tuple = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 646 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowercase = {
'''xlm-roberta-base''': 5_12,
'''xlm-roberta-large''': 5_12,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_12,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_12,
'''xlm-roberta-large-finetuned-conll03-english''': 5_12,
'''xlm-roberta-large-finetuned-conll03-german''': 5_12,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A = 1
A = len(self.sp_model ) + self.fairseq_offset
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) -> Any:
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str ,A_ : str ) -> Optional[Any]:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = ''.join(A_ ).replace(A_ ,' ' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 91 | 0 |
import math
import sys
def a(lowercase__ ):
'''simple docstring'''
if number != int(lowercase__ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
snake_case_ = [-1] * (number + 1)
snake_case_ = 0
for i in range(1 , number + 1 ):
snake_case_ = sys.maxsize
snake_case_ = int(math.sqrt(lowercase__ ) )
for j in range(1 , root + 1 ):
snake_case_ = 1 + answers[i - (j**2)]
snake_case_ = min(lowercase__ , lowercase__ )
snake_case_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46 | 0 |
import numpy as np
def lowerCamelCase__ ( _a):
return 1 / (1 + np.exp(-vector))
def lowerCamelCase__ ( _a):
return vector * sigmoid(_a)
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a__ : str =None
try:
import msvcrt
except ImportError:
a__ : List[str] =None
try:
import fcntl
except ImportError:
a__ : Any =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a__ : Dict =OSError
# Data
# ------------------------------------------------
a__ : str =[
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
a__ : Union[str, Any] ='''3.0.12'''
a__ : Union[str, Any] =None
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
global _logger
__UpperCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : str ):
__UpperCamelCase = lock_file
return None
def __str__( self : Any ):
__UpperCamelCase = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Union[str, Any] ):
__UpperCamelCase = lock
return None
def __enter__( self : int ):
return self.lock
def __exit__( self : List[str] , __A : int , __A : Dict , __A : List[Any] ):
self.lock.release()
return None
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : Optional[Any] , __A : str=-1 , __A : Any=None ):
__UpperCamelCase = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
__UpperCamelCase = self.hash_filename_if_too_long(__A , __A )
# The path to the lock file.
__UpperCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__UpperCamelCase = None
# The default timeout value.
__UpperCamelCase = timeout
# We use this lock primarily for the lock counter.
__UpperCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__UpperCamelCase = 0
return None
@property
def _lowerCamelCase ( self : List[Any] ):
return self._lock_file
@property
def _lowerCamelCase ( self : Optional[int] ):
return self._timeout
@timeout.setter
def _lowerCamelCase ( self : Any , __A : Optional[Any] ):
__UpperCamelCase = float(__A )
return None
def _lowerCamelCase ( self : Tuple ):
raise NotImplementedError()
def _lowerCamelCase ( self : int ):
raise NotImplementedError()
@property
def _lowerCamelCase ( self : Tuple ):
return self._lock_file_fd is not None
def _lowerCamelCase ( self : List[str] , __A : int=None , __A : str=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
__UpperCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__UpperCamelCase = id(self )
__UpperCamelCase = self._lock_file
__UpperCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(__A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__UpperCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _lowerCamelCase ( self : str , __A : str=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__UpperCamelCase = id(self )
__UpperCamelCase = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
__UpperCamelCase = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : Dict ):
self.acquire()
return self
def __exit__( self : str , __A : Optional[int] , __A : List[Any] , __A : str ):
self.release()
return None
def __del__( self : Any ):
self.release(force=__A )
return None
def _lowerCamelCase ( self : Any , __A : str , __A : int ):
__UpperCamelCase = os.path.basename(__A )
if len(__A ) > max_length and max_length > 0:
__UpperCamelCase = os.path.dirname(__A )
__UpperCamelCase = str(hash(__A ) )
__UpperCamelCase = filename[: max_length - len(__A ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__A , __A )
else:
return path
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : Optional[Any] , __A : Optional[Any]=-1 , __A : Dict=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__A , timeout=__A , max_filename_length=__A )
__UpperCamelCase = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__UpperCamelCase = os.open(self._lock_file , __A )
except OSError:
pass
else:
try:
msvcrt.locking(__A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__A )
else:
__UpperCamelCase = fd
return None
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self._lock_file_fd
__UpperCamelCase = None
msvcrt.locking(__A , msvcrt.LK_UNLCK , 1 )
os.close(__A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : List[str] , __A : Any=-1 , __A : Union[str, Any]=None ):
__UpperCamelCase = os.statvfs(os.path.dirname(__A ) ).f_namemax
super().__init__(__A , timeout=__A , max_filename_length=__A )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__UpperCamelCase = os.open(self._lock_file , __A )
try:
fcntl.flock(__A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__A )
else:
__UpperCamelCase = fd
return None
def _lowerCamelCase ( self : Dict ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__UpperCamelCase = self._lock_file_fd
__UpperCamelCase = None
fcntl.flock(__A , fcntl.LOCK_UN )
os.close(__A )
return None
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : str ):
__UpperCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__UpperCamelCase = os.open(self._lock_file , __A )
except OSError:
pass
else:
__UpperCamelCase = fd
return None
def _lowerCamelCase ( self : str ):
os.close(self._lock_file_fd )
__UpperCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a__ : Optional[Any] =None
if msvcrt:
a__ : Any =WindowsFileLock
elif fcntl:
a__ : Union[str, Any] =UnixFileLock
else:
a__ : Dict =SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 399 | 0 |
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : Optional[Any] = len(UpperCAmelCase__)
lowerCamelCase : List[str] = sum(UpperCAmelCase__)
lowerCamelCase : Dict = [[False for x in range(s + 1)] for y in range(n + 1)]
for i in range(1 , n + 1):
lowerCamelCase : Optional[Any] = True
for i in range(1 , s + 1):
lowerCamelCase : Optional[int] = False
for i in range(1 , n + 1):
for j in range(1 , s + 1):
lowerCamelCase : int = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCamelCase : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2) , -1 , -1):
if dp[n][j] is True:
lowerCamelCase : Union[str, Any] = s - 2 * j
break
return diff
| 449 |
'''simple docstring'''
import numpy as np
def UpperCAmelCase ( UpperCAmelCase__ : np.array):
return 1 / (1 + np.exp(-vector))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[Any] ) -> List[str]:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_00, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""") | 8 |
from typing import Any
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Creates data structures and fill initial step
UpperCamelCase__ : dict = {}
UpperCamelCase__ : dict = {}
for state in states_space:
UpperCamelCase__ : Optional[int] = observations_space[0]
UpperCamelCase__ : Any = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCamelCase__ ) ):
UpperCamelCase__ : str = observations_space[o]
UpperCamelCase__ : Union[str, Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ : int = ''''''
UpperCamelCase__ : List[str] = -1
for k_state in states_space:
UpperCamelCase__ : Union[str, Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ : Tuple = probability
UpperCamelCase__ : Union[str, Any] = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ : Tuple = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ : Optional[Any] = arg_max
# The final observation
UpperCamelCase__ : List[str] = observations_space[len(UpperCamelCase__ ) - 1]
# argmax for given final observation
UpperCamelCase__ : Dict = ''''''
UpperCamelCase__ : Tuple = -1
for k_state in states_space:
UpperCamelCase__ : Any = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ : List[str] = probability
UpperCamelCase__ : Tuple = k_state
UpperCamelCase__ : Any = arg_max
# Process pointers backwards
UpperCamelCase__ : List[Any] = last_state
UpperCamelCase__ : int = []
for o in range(len(UpperCamelCase__ ) - 1 , -1 , -1 ):
result.append(UpperCamelCase__ )
UpperCamelCase__ : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_not_empty(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_validate_lists(UpperCamelCase__ , UpperCamelCase__ )
_validate_dicts(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_list(UpperCamelCase__ , '''observations_space''' )
_validate_list(UpperCamelCase__ , '''states_space''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list'''
raise ValueError(UpperCamelCase__ )
else:
for x in _object:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list of strings'''
raise ValueError(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_dict(UpperCamelCase__ , '''initial_probabilities''' , UpperCamelCase__ )
_validate_nested_dict(UpperCamelCase__ , '''transition_probabilities''' )
_validate_nested_dict(UpperCamelCase__ , '''emission_probabilities''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_dict(_object , UpperCamelCase__ , UpperCamelCase__ )
for x in _object.values():
_validate_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[str] = f'''{var_name} must be a dict'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object ):
UpperCamelCase__ : Dict = f'''{var_name} all keys must be strings'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object.values() ):
UpperCamelCase__ : Optional[Any] = '''nested dictionary ''' if nested else ''''''
UpperCamelCase__ : Optional[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Tuple = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 580 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP | 580 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE__=None , ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : List[str] = use_labels
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : int = scope
lowerCAmelCase__ : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase__ : List[str] = (self.image_size // 32) ** 2
lowerCAmelCase__ : Optional[Any] = num_patches + 1
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE__ , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = ViTHybridModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__a = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__a = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ViTHybridModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def lowercase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(config=SCREAMING_SNAKE_CASE__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase__ : int = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowercase_ ( self ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _a ( ):
lowerCAmelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Any = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
@require_accelerate
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
lowerCAmelCase__ : Tuple = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase__ : Any = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 233 |
import logging
from transformers.configuration_utils import PretrainedConfig
A__ : Tuple = logging.getLogger(__name__)
class lowercase ( __UpperCamelCase ):
__a = """masked_bert"""
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="topK" , SCREAMING_SNAKE_CASE__="constant" , SCREAMING_SNAKE_CASE__=0.0 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = pruning_method
lowerCAmelCase__ : List[Any] = mask_init
lowerCAmelCase__ : Dict = mask_scale
| 233 | 1 |
from sklearn.metrics import fa_score
import datasets
_SCREAMING_SNAKE_CASE : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_SCREAMING_SNAKE_CASE : Any = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__="binary" , UpperCamelCase__=None ):
A__ : Optional[Any] = fa_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ )
return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score} | 55 |
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 55 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : dict ):
'''simple docstring'''
snake_case_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case_ : set[int] = set()
return any(
node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for node in graph )
def __lowerCAmelCase ( __UpperCamelCase : dict , __UpperCamelCase : int , __UpperCamelCase : set , __UpperCamelCase : set ):
'''simple docstring'''
visited.add(__UpperCamelCase )
rec_stk.add(__UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''BlipImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
# add QFormer tokenizer
snake_case_ : List[str] = qformer_tokenizer
def __call__( self , _lowercase = None , _lowercase = None , _lowercase = True , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = True , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case_ : Optional[Any] = BatchFeature()
if text is not None:
snake_case_ : List[str] = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase )
snake_case_ : Union[str, Any] = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
snake_case_ : List[str] = qformer_text_encoding.pop("""input_ids""" )
snake_case_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case_ : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.isfile(_lowercase ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_lowercase , exist_ok=_lowercase )
snake_case_ : int = os.path.join(_lowercase , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_lowercase )
return super().save_pretrained(_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoTokenizer.from_pretrained(_lowercase , subfolder="""qformer_tokenizer""" )
snake_case_ : Union[str, Any] = cls._get_arguments_from_pretrained(_lowercase , **_lowercase )
args.append(_lowercase )
return cls(*_lowercase )
| 58 | 1 |
__lowerCAmelCase : List[str] ='Input must be a string of 8 numbers plus letter'
__lowerCAmelCase : int ='TRWAGMYFPDXBNJZSQVHLCKE'
def _UpperCamelCase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = F'''Expected string as input, found {type(lowercase__ ).__name__}'''
raise TypeError(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = spanish_id.replace('''-''' , '''''' ).upper()
if len(lowercase__ ) != 9:
raise ValueError(lowercase__ )
try:
__SCREAMING_SNAKE_CASE : List[Any] = int(spanish_id_clean[0:8] )
__SCREAMING_SNAKE_CASE : Any = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase__ ) from ex
if letter.isdigit():
raise ValueError(lowercase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
class _lowercase :
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :list[int] ) -> None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = [0] * len_array
if len_array > 0:
__SCREAMING_SNAKE_CASE : List[Any] = array[0]
for i in range(1 , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = self.prefix_sum[i - 1] + array[i]
def __magic_name__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int ) -> bool:
__SCREAMING_SNAKE_CASE : Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__A = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
lowerCAmelCase__ :List[Any] = self.transformer_dir
shutil.copy(
os.path.join(__UpperCAmelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase__ :List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase__ :Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowerCAmelCase__ :int = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase )
lowerCAmelCase__ :Any = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(__UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCAmelCase )
with open(__UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __UpperCAmelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase__ :Optional[int] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('Bert' , __UpperCAmelCase , __UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __UpperCAmelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __UpperCAmelCase ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowerCAmelCase__ :Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowerCAmelCase__ :Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ :Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['format_model_list'] )
self.assertFalse(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowerCAmelCase__ :Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ :Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ , lowerCAmelCase__ :int = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 93 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( *snake_case , **snake_case ) -> str:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]:
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , image_processor=snake_case )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case , {
'score': ANY(snake_case ),
'label': ANY(snake_case ),
'box': {'xmin': ANY(snake_case ), 'ymin': ANY(snake_case ), 'xmax': ANY(snake_case ), 'ymax': ANY(snake_case )},
} , )
import datasets
_UpperCAmelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_UpperCAmelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_UpperCAmelCase = object_detector(snake_case , threshold=0.0 )
self.assertEqual(len(snake_case ) , len(snake_case ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case , {
'score': ANY(snake_case ),
'label': ANY(snake_case ),
'box': {'xmin': ANY(snake_case ), 'ymin': ANY(snake_case ), 'xmax': ANY(snake_case ), 'ymax': ANY(snake_case )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowerCamelCase_ ( self ) -> List[Any]:
pass
@require_torch
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(snake_case )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(snake_case )
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , feature_extractor=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = AutoModelForObjectDetection.from_pretrained(snake_case )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(snake_case )
_UpperCAmelCase = ObjectDetectionPipeline(model=snake_case , feature_extractor=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = pipeline('object-detection' , model=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_UpperCAmelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = 0.9985
_UpperCAmelCase = 'facebook/detr-resnet-50'
_UpperCAmelCase = pipeline('object-detection' , model=snake_case )
_UpperCAmelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=snake_case )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = 'Narsil/layoutlmv3-finetuned-funsd'
_UpperCAmelCase = 0.9993
_UpperCAmelCase = pipeline('object-detection' , model=snake_case , threshold=snake_case )
_UpperCAmelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 573 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = 42
snake_case = 42
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case = 1
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 2000 , SCREAMING_SNAKE_CASE_ : float = 0.1_5 , SCREAMING_SNAKE_CASE_ : float = 0.0_1 , SCREAMING_SNAKE_CASE_ : float = 1_3_4_8.0 , SCREAMING_SNAKE_CASE_ : float = 1e-5 , SCREAMING_SNAKE_CASE_ : int = 1 , ):
# standard deviation of the initial noise distribution
lowerCamelCase__ = sigma_max
# setable values
lowerCamelCase__ = None
self.set_sigmas(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ):
return sample
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ):
lowerCamelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCamelCase__ = torch.linspace(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : float = None ):
lowerCamelCase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCamelCase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCamelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCamelCase__ = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE_ ) , math.log(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
lowerCamelCase__ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCamelCase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCamelCase__ = timesteps.to(self.discrete_sigmas.device )
lowerCamelCase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowerCamelCase__ = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(sample.device )
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCamelCase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCamelCase__ = diffusion.unsqueeze(-1 )
lowerCamelCase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCamelCase__ = randn_tensor(
sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ , device=sample.device , dtype=sample.dtype )
lowerCamelCase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCamelCase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE_ , prev_sample_mean=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCamelCase__ = randn_tensor(sample.shape , layout=sample.layout , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCamelCase__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCamelCase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCamelCase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCamelCase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCamelCase__ = step_size.unsqueeze(-1 )
lowerCamelCase__ = sample + step_size * model_output
lowerCamelCase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCamelCase__ = timesteps.to(original_samples.device )
lowerCamelCase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCamelCase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE_ ) * sigmas[:, None, None, None]
)
lowerCamelCase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 258 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ : # Public class to implement a graph
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ):
lowerCamelCase__ = row
lowerCamelCase__ = col
lowerCamelCase__ = graph
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[list[bool]] ):
# Checking all 8 elements surrounding nth element
lowerCamelCase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowerCamelCase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowerCamelCase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : int ): # And finally, count all islands.
lowerCamelCase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowerCamelCase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += 1
return count
| 258 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_UpperCamelCase : Tuple = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
_UpperCamelCase : str = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : List[Any] = list(s_dict.keys() )
for key in keys:
lowercase__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase__ : List[Any] = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(f"""{key} -> {new_key}""" )
lowercase__ : Optional[Any] = s_dict.pop(_lowerCAmelCase )
return s_dict
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[int] = emb.weight.shape
lowercase__ : Optional[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
lowercase__ : Union[str, Any] = emb.weight.data
return lin_layer
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowercase__ : Dict = os.path.basename(_lowerCAmelCase )
lowercase__ : List[str] = url.split('/' )[-2]
lowercase__ : int = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not os.path.isfile(_lowerCAmelCase ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_lowerCAmelCase ):
lowercase__ : List[Any] = open(_lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_lowerCAmelCase ) as source, open(_lowerCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
lowercase__ : Any = source.read(8192 )
if not buffer:
break
output.write(_lowerCAmelCase )
loop.update(len(_lowerCAmelCase ) )
lowercase__ : Optional[Any] = open(_lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowercase__ : Any = _download(_MODELS[checkpoint_path] )
else:
lowercase__ : Optional[Any] = torch.load(_lowerCAmelCase , map_location='cpu' )
lowercase__ : Optional[int] = original_checkpoint['dims']
lowercase__ : int = original_checkpoint['model_state_dict']
lowercase__ : Union[str, Any] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
lowercase__ : str = True
lowercase__ : str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowercase__ : Optional[Any] = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_lowerCAmelCase , decoder_ffn_dim=_lowerCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
lowercase__ : Dict = WhisperForConditionalGeneration(_lowerCAmelCase )
lowercase__ , lowercase__ : Union[str, Any] = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase__ : Tuple = proj_out_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : Dict = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 599 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = DiTPipeline
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCamelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : Dict = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=a , )
lowercase__ : int = AutoencoderKL()
lowercase__ : Dict = DDIMScheduler()
lowercase__ : List[str] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _UpperCAmelCase ( self , a , a=0 ) -> Dict:
if str(a ).startswith('mps' ):
lowercase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowercase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowercase__ : Optional[int] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = 'cpu'
lowercase__ : Any = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs(a )
lowercase__ : List[str] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
lowercase__ : Any = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1e-3 )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase__ : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
lowercase__ : Optional[int] = pipe.get_label_ids(a )
lowercase__ : Optional[int] = pipe(a , generator=a , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : Tuple = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase__ : Tuple = ['vase', 'umbrella']
lowercase__ : List[str] = pipe.get_label_ids(a )
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe(a , generator=a , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 599 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 4 ) -> list[list[int]]:
"""simple docstring"""
a_ = abs(UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(UpperCamelCase )] for y in range(UpperCamelCase )]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
a_ = [list(UpperCamelCase ) for x in zip(*UpperCamelCase )]
return matrix
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
a_ = matrix[::-1]
return matrix
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
a_ = [x[::-1] for x in matrix]
return matrix
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*UpperCamelCase )
if __name__ == "__main__":
_A = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
_A = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
_A = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix)) | 403 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a_ = flax_key_tuple[:-1] + ("""weight""",)
a_ = torch.permute(UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase ):
# linear layer
a_ = flax_key_tuple[:-1] + ("""weight""",)
a_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a_ = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if "metadata" in layer:
a_ = layer.split("""metadata""" )
a_ = """""".join(split_layer[0] )[:-1]
a_ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a_ = layer.split("""kvstore""" )
a_ = """""".join(split_layer[0] )[:-1]
a_ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a_ = layer.split("""/""" )
a_ = """/""".join(split_layer[:-1] )
a_ = (split_layer[-1],)
if "kvstore/path" in layer:
a_ = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
a_ = """file"""
else:
a_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : Any ) -> List[str]:
"""simple docstring"""
a_ = rename_keys(UpperCamelCase )
a_ = {}
for k, v in current_block.items():
a_ = v
a_ = new_current_block
torch.save(UpperCamelCase , UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str = WEIGHTS_NAME ) -> Any:
"""simple docstring"""
a_ = convert_file_size_to_int(UpperCamelCase )
a_ = []
a_ = {}
a_ = 0
a_ = 0
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a_ = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a_ = flatten_dict(UpperCamelCase , sep="""/""" )
a_ = {}
for layer in checkpoint_info.keys():
a_ , a_ , a_ = get_key_and_tensorstore_dict(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if curr_real_layer_name in all_layers:
a_ = content
else:
a_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a_ = torch.tensor(UpperCamelCase )
a_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a_ , a_ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase )
a_ = """/""".join(UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a_ = os.path.join(
UpperCamelCase , weights_name.replace(""".bin""" , F"""-{len(UpperCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a_ = {}
a_ = 0
a_ = raw_weights.to(getattr(UpperCamelCase , UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a_ = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , F"""-{len(UpperCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCamelCase , UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a_ = {}
a_ = {}
for idx, shard in enumerate(UpperCamelCase ):
a_ = weights_name.replace(
""".bin""" , F"""-{idx+1:05d}-of-{len(UpperCamelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
a_ = os.path.join(UpperCamelCase , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
a_ = shard
for key in shard:
a_ = shard_file
# Add the metadata
a_ = {"""total_size""": total_size}
a_ = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a_ = json.dumps(UpperCamelCase , indent=2 , sort_keys=UpperCamelCase ) + """\n"""
f.write(UpperCamelCase )
return metadata, index
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a_ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a_ = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a_ = TaTokenizer.from_pretrained("""t5-small""" )
a_ = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a_ = tokenizer(UpperCamelCase , return_tensors="""pt""" ).input_ids
a_ = model.generate(UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 403 | 1 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['image_processor', 'tokenizer']
lowerCAmelCase_ = 'AutoImageProcessor'
lowerCAmelCase_ = 'AutoTokenizer'
def __init__( self : str,__A : List[str]=None,__A : Optional[Any]=None,**__A : str ):
_lowerCamelCase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",__A,)
_lowerCamelCase : Optional[int] = kwargs.pop("feature_extractor" )
_lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A,__A )
_lowerCamelCase : Any = self.image_processor
_lowerCamelCase : str = False
def __call__( self : List[Any],*__A : Tuple,**__A : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A,**__A )
_lowerCamelCase : List[str] = kwargs.pop("images",__A )
_lowerCamelCase : Any = kwargs.pop("text",__A )
if len(__A ) > 0:
_lowerCamelCase : Dict = args[0]
_lowerCamelCase : Tuple = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : str = self.image_processor(__A,*__A,**__A )
if text is not None:
_lowerCamelCase : Optional[int] = self.tokenizer(__A,**__A )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Union[str, Any] = encodings["input_ids"]
return inputs
def lowerCamelCase_ ( self : int,*__A : List[str],**__A : Any ):
return self.tokenizer.batch_decode(*__A,**__A )
def lowerCamelCase_ ( self : Dict,*__A : List[Any],**__A : List[str] ):
return self.tokenizer.decode(*__A,**__A )
@contextmanager
def lowerCamelCase_ ( self : Optional[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
_lowerCamelCase : Tuple = True
_lowerCamelCase : Optional[int] = self.tokenizer
yield
_lowerCamelCase : Any = self.image_processor
_lowerCamelCase : List[str] = False
def lowerCamelCase_ ( self : List[Any],__A : str,__A : int=False,__A : int=None ):
if added_vocab is None:
_lowerCamelCase : List[Any] = self.tokenizer.get_added_vocab()
_lowerCamelCase : str = {}
while tokens:
_lowerCamelCase : Tuple = re.search(r"<s_(.*?)>",__A,re.IGNORECASE )
if start_token is None:
break
_lowerCamelCase : Any = start_token.group(1 )
_lowerCamelCase : Optional[Any] = re.search(rf'</s_{key}>',__A,re.IGNORECASE )
_lowerCamelCase : int = start_token.group()
if end_token is None:
_lowerCamelCase : Any = tokens.replace(__A,"" )
else:
_lowerCamelCase : Dict = end_token.group()
_lowerCamelCase : Union[str, Any] = re.escape(__A )
_lowerCamelCase : Union[str, Any] = re.escape(__A )
_lowerCamelCase : Union[str, Any] = re.search(f'{start_token_escaped}(.*?){end_token_escaped}',__A,re.IGNORECASE )
if content is not None:
_lowerCamelCase : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_lowerCamelCase : Union[str, Any] = self.tokenajson(__A,is_inner_value=__A,added_vocab=__A )
if value:
if len(__A ) == 1:
_lowerCamelCase : Optional[Any] = value[0]
_lowerCamelCase : Dict = value
else: # leaf nodes
_lowerCamelCase : Tuple = []
for leaf in content.split(r"<sep/>" ):
_lowerCamelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_lowerCamelCase : int = leaf[1:-2] # for categorical special tokens
output[key].append(__A )
if len(output[key] ) == 1:
_lowerCamelCase : Any = output[key][0]
_lowerCamelCase : List[Any] = tokens[tokens.find(__A ) + len(__A ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:],is_inner_value=__A,added_vocab=__A )
if len(__A ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCamelCase_ ( self : Any ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",__A,)
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Dict ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",__A,)
return self.image_processor | 44 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: SplitDict ) -> Optional[int]:
'''simple docstring'''
A__ = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
A__ = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A__ = None
# the split name of split_dict takes over the name of the split info object
A__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 514 | 0 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def A__ ( A__ ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A__ ( A__ , A__ , A__ ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.nan
for i in range(A__ ):
_UpperCAmelCase = features[:, labels == i]
_UpperCAmelCase = data.mean(1 )
# Centralize the data of class i
_UpperCAmelCase = data - column_reshape(A__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase = np.dot(A__ , centered_data.T )
return covariance_sum / features.shape[1]
def A__ ( A__ , A__ , A__ ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = features.mean(1 )
_UpperCAmelCase = np.nan
for i in range(A__ ):
_UpperCAmelCase = features[:, labels == i]
_UpperCAmelCase = data.shape[1]
_UpperCAmelCase = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase = device_data * np.dot(
column_reshape(A__ ) - column_reshape(A__ ) , (column_reshape(A__ ) - column_reshape(A__ )).T , )
return covariance_sum / features.shape[1]
def A__ ( A__ , A__ ) -> np.ndarray:
'''simple docstring'''
if features.any():
_UpperCAmelCase = features.mean(1 )
# Center the dataset
_UpperCAmelCase = features - np.reshape(A__ , (data_mean.size, 1) )
_UpperCAmelCase = np.dot(A__ , centered_data.T ) / features.shape[1]
_UpperCAmelCase = np.linalg.eigh(A__ )
# Take all the columns in the reverse order (-1), and then takes only the first
_UpperCAmelCase = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_UpperCAmelCase = np.dot(filtered_eigenvectors.T , A__ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A__ )
logging.error("Dataset empty" )
raise AssertionError
def A__ ( A__ , A__ , A__ , A__ ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_UpperCAmelCase = eigh(
covariance_between_classes(A__ , A__ , A__ ) , covariance_within_classes(A__ , A__ , A__ ) , )
_UpperCAmelCase = eigenvectors[:, ::-1][:, :dimensions]
_UpperCAmelCase = np.linalg.svd(A__ )
_UpperCAmelCase = svd_matrix[:, 0:dimensions]
_UpperCAmelCase = np.dot(filtered_svd_matrix.T , A__ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A__ )
logging.error("Dataset empty" )
raise AssertionError
def A__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_UpperCAmelCase = np.array([0, 0, 0, 1, 1] )
_UpperCAmelCase = 2
_UpperCAmelCase = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A__ ) as error_info:
_UpperCAmelCase = linear_discriminant_analysis(
A__ , A__ , A__ , A__ )
if isinstance(A__ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_UpperCAmelCase = 2
_UpperCAmelCase = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(A__ ) as error_info:
_UpperCAmelCase = principal_component_analysis(A__ , A__ )
if not np.allclose(A__ , A__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> int:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = generator.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 579 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : int = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 64 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _A ( A ) -> int:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def _A ( A ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
lowercase : int = ord(A )
if not _is_chinese_char(A ):
return 0
return 1
def _A ( A ) -> Dict:
lowercase : Any = set()
for token in tokens:
lowercase : str = len(A ) > 1 and is_chinese(A )
if chinese_word:
word_set.add(A )
lowercase : str = list(A )
return word_list
def _A ( A ,A ) -> Tuple:
if not chinese_word_set:
return bert_tokens
lowercase : Optional[Any] = max([len(A ) for w in chinese_word_set] )
lowercase : Any = bert_tokens
lowercase , lowercase : int = 0, len(A )
while start < end:
lowercase : List[Any] = True
if is_chinese(bert_word[start] ):
lowercase : Optional[Any] = min(end - start ,A )
for i in range(A ,1 ,-1 ):
lowercase : Any = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
lowercase : Any = "##" + bert_word[j]
lowercase : List[Any] = start + i
lowercase : List[Any] = False
break
if single_word:
start += 1
return bert_word
def _A ( A ,A ,A ) -> List[Any]:
lowercase : List[Any] = []
for i in range(0 ,len(A ) ,1_0_0 ):
lowercase : int = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
lowercase : Dict = [get_chinese_word(A ) for r in res]
ltp_res.extend(A )
assert len(A ) == len(A )
lowercase : List[str] = []
for i in range(0 ,len(A ) ,1_0_0 ):
lowercase : int = bert_tokenizer(lines[i : i + 1_0_0] ,add_special_tokens=A ,truncation=A ,max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(A ) == len(A )
lowercase : Optional[Any] = []
for input_ids, chinese_word in zip(A ,A ):
lowercase : str = []
for id in input_ids:
lowercase : Any = bert_tokenizer._convert_id_to_token(A )
input_tokens.append(A )
lowercase : Tuple = add_sub_symbol(A ,A )
lowercase : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A ):
if token[:2] == "##":
lowercase : Any = token[2:]
# save chinese tokens' pos
if len(A ) == 1 and _is_chinese_char(ord(A ) ):
ref_id.append(A )
ref_ids.append(A )
assert len(A ) == len(A )
return ref_ids
def _A ( A ) -> Union[str, Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
lowercase : List[str] = f.readlines()
lowercase : Tuple = [line.strip() for line in data if len(A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase : Any = LTP(args.ltp ) # faster in GPU device
lowercase : int = BertTokenizer.from_pretrained(args.bert )
lowercase : List[str] = prepare_ref(A ,A ,A )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
lowercase : Optional[int] = [json.dumps(A ) + "\n" for ref in ref_ids]
f.writelines(A )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 372 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : jnp.ndarray
@flax_register_to_config
class _UpperCAmelCase ( nn.Module , lowercase , lowercase ):
lowerCamelCase_ : int = 3_2
lowerCamelCase_ : int = 4
lowerCamelCase_ : int = 4
lowerCamelCase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase_ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCamelCase_ : Union[bool, Tuple[bool]] = False
lowerCamelCase_ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
lowerCamelCase_ : int = 2
lowerCamelCase_ : Union[int, Tuple[int]] = 8
lowerCamelCase_ : Optional[Union[int, Tuple[int]]] = None
lowerCamelCase_ : int = 1_2_8_0
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
lowerCamelCase_ : bool = True
lowerCamelCase_ : int = 0
lowerCamelCase_ : bool = False
def _snake_case ( self : Optional[int] , UpperCAmelCase : jax.random.KeyArray):
# init input tensors
SCREAMING_SNAKE_CASE_ :Any = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ :Optional[int] = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_ :List[str] = jnp.ones((1,) , dtype=jnp.intaa)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_ :List[Any] = jax.random.split(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = {"params": params_rng, "dropout": dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)["params"]
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Any = self.block_out_channels
SCREAMING_SNAKE_CASE_ :Any = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.")
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ :Dict = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ :List[str] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ :Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
SCREAMING_SNAKE_CASE_ :Dict = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype)
SCREAMING_SNAKE_CASE_ :Tuple = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[int] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Dict = (num_attention_heads,) * len(self.down_block_types)
# down
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
SCREAMING_SNAKE_CASE_ :int = output_channel
SCREAMING_SNAKE_CASE_ :Union[str, Any] = block_out_channels[i]
SCREAMING_SNAKE_CASE_ :List[Any] = i == len(UpperCAmelCase) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ :Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ :Any = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = down_blocks
# mid
SCREAMING_SNAKE_CASE_ :List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE_ :str = []
SCREAMING_SNAKE_CASE_ :Optional[Any] = list(reversed(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Union[str, Any] = list(reversed(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Optional[Any] = list(reversed(UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = output_channel
SCREAMING_SNAKE_CASE_ :Optional[Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase) - 1)]
SCREAMING_SNAKE_CASE_ :List[str] = i == len(UpperCAmelCase) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE_ :List[Any] = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ :Tuple = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = output_channel
SCREAMING_SNAKE_CASE_ :int = up_blocks
# out
SCREAMING_SNAKE_CASE_ :Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
SCREAMING_SNAKE_CASE_ :Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ):
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray):
SCREAMING_SNAKE_CASE_ :Optional[Any] = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(UpperCAmelCase , jnp.ndarray) and len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ :Tuple = timesteps.astype(dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = jnp.expand_dims(UpperCAmelCase , 0)
SCREAMING_SNAKE_CASE_ :List[Any] = self.time_proj(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = self.time_embedding(UpperCAmelCase)
# 2. pre-process
SCREAMING_SNAKE_CASE_ :int = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1))
SCREAMING_SNAKE_CASE_ :int = self.conv_in(UpperCAmelCase)
# 3. down
SCREAMING_SNAKE_CASE_ :List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train)
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE_ :Dict = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ :Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ :List[str] = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ :Dict = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE_ :Any = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :str = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE_ :int = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train)
# 6. post-process
SCREAMING_SNAKE_CASE_ :List[str] = self.conv_norm_out(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = nn.silu(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = self.conv_out(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase)
| 708 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
SCREAMING_SNAKE_CASE_ :Optional[Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a ):
os.makedirs(a )
SCREAMING_SNAKE_CASE_ :List[str] = model.state_dict()
def to_tf_var_name(a ):
for patt, repl in iter(a ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = name.replace(a , a )
return F"bert/{name}"
def create_tf_var(a , a , a ):
SCREAMING_SNAKE_CASE_ :int = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE_ :Tuple = tf.get_variable(dtype=a , shape=tensor.shape , name=a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE_ :Any = to_tf_var_name(a )
SCREAMING_SNAKE_CASE_ :List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE_ :Tuple = torch_tensor.T
SCREAMING_SNAKE_CASE_ :List[Any] = create_tf_var(tensor=a , name=a , session=a )
tf.keras.backend.set_value(a , a )
SCREAMING_SNAKE_CASE_ :Tuple = session.run(a )
print(F"Successfully created {tf_name}: {np.allclose(a , a )}" )
SCREAMING_SNAKE_CASE_ :List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(a , os.path.join(a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def lowercase ( a=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a , required=a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a , default=a , required=a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a , required=a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a , required=a , help="Directory in which to save tensorflow model" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = parser.parse_args(a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 140 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = b.T
UpperCAmelCase_ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 )
UpperCAmelCase_ = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 )
UpperCAmelCase_ = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ = x.reshape(-1 , 3 )
UpperCAmelCase_ = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = True , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"height": 256, "width": 256}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
UpperCAmelCase_ = np.array(lowerCAmelCase ) if clusters is not None else None
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_color_quantize
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowerCAmelCase , size=(size["height"], size["width"]) , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , ):
UpperCAmelCase_ = rescale(image=lowerCAmelCase , scale=1 / 127.5 , data_format=lowerCAmelCase )
UpperCAmelCase_ = image - 1
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase_ = clusters if clusters is not None else self.clusters
UpperCAmelCase_ = np.array(lowerCAmelCase )
UpperCAmelCase_ = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=lowerCAmelCase ) for image in images]
if do_color_quantize:
UpperCAmelCase_ = [to_channel_dimension_format(lowerCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase_ = np.array(lowerCAmelCase )
UpperCAmelCase_ = color_quantize(lowerCAmelCase , lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase_ = images.shape[0]
UpperCAmelCase_ = images.reshape(lowerCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase_ = list(lowerCAmelCase )
else:
UpperCAmelCase_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
UpperCAmelCase_ = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 579 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def snake_case__ ( ) -> Dict:
UpperCAmelCase_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase_ = get_sagemaker_input()
else:
UpperCAmelCase_ = get_cluster_input()
return config
def snake_case__ ( __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser("config" , description=__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = argparse.ArgumentParser("Accelerate config command" , description=__SCREAMING_SNAKE_CASE )
parser.add_argument(
"--config_file" , default=__SCREAMING_SNAKE_CASE , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = get_user_input()
if args.config_file is not None:
UpperCAmelCase_ = args.config_file
else:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(__SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def snake_case__ ( ) -> Dict:
UpperCAmelCase_ = config_command_parser()
UpperCAmelCase_ = parser.parse_args()
config_command(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 579 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCAmelCase :
def __init__( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : int = 1_3 , __magic_name__ : int = 6_4 , __magic_name__ : int = 2 , __magic_name__ : int = 3 , __magic_name__ : int = 3 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : int = 1_2_8 , __magic_name__ : int=[1_6, 3_2, 6_4, 1_2_8] , __magic_name__ : int = 7 , __magic_name__ : int = 4 , __magic_name__ : int = 3_7 , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 1_0 , __magic_name__ : float = 0.02 , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 1_2_8 , __magic_name__ : List[int] = [2, 2, 2, 2] , __magic_name__ : int = 2 , __magic_name__ : int = 2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
UpperCamelCase = num_attention_outputs
UpperCamelCase = embed_dim
UpperCamelCase = embed_dim + 1
UpperCamelCase = resolution
UpperCamelCase = depths
UpperCamelCase = hidden_sizes
UpperCamelCase = dim
UpperCamelCase = mlp_expansion_ratio
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TFEfficientFormerModel(config=__magic_name__ )
UpperCamelCase = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = TFEfficientFormerForImageClassification(__magic_name__ )
UpperCamelCase = model(__magic_name__ , labels=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFEfficientFormerForImageClassification(__magic_name__ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
lowercase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowercase = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFEfficientFormerModelTester(self )
UpperCamelCase = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(__magic_name__ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ):
UpperCamelCase = model_class(__magic_name__ )
UpperCamelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) , training=__magic_name__ )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
if hasattr(self.model_tester , """encoder_seq_length""" ):
UpperCamelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
UpperCamelCase = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCamelCase = outputs.decoder_hidden_states
self.asseretIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
UpperCamelCase = getattr(self.model_tester , """seq_length""" , __magic_name__ )
UpperCamelCase = getattr(self.model_tester , """decoder_seq_length""" , __magic_name__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Dict=False ):
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFEfficientFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
UpperCamelCase = getattr(self.model_tester , """seq_length""" , __magic_name__ )
UpperCamelCase = getattr(self.model_tester , """encoder_seq_length""" , __magic_name__ )
UpperCamelCase = getattr(self.model_tester , """key_length""" , __magic_name__ )
UpperCamelCase = getattr(self.model_tester , """chunk_length""" , __magic_name__ )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
UpperCamelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(__magic_name__ )
UpperCamelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) , training=__magic_name__ )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(__magic_name__ )
UpperCamelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) , training=__magic_name__ )
UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase = model_class(__magic_name__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__magic_name__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase = model(__magic_name__ )
self.assertTrue(outputs_dict is not None )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**__magic_name__ , training=__magic_name__ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
UpperCamelCase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**__magic_name__ , training=__magic_name__ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
UpperCamelCase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 181 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__snake_case = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase ( __snake_case ):
def __init__( self : Dict , *__magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
UpperCamelCase = eval_examples
UpperCamelCase = post_process_function
UpperCamelCase = quant_trainer_args
UpperCamelCase = 1_2_8 # default number of calibration samples
def lowerCamelCase_ ( self : str , __magic_name__ : List[Any]=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase = self._remove_unused_columns(__magic_name__ , description="""Calibration""" )
return DataLoader(
__magic_name__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__magic_name__ , )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : List[str]=None ):
"""simple docstring"""
UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase = self.get_calib_dataloader(__magic_name__ )
UpperCamelCase = self.model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args , calib=__magic_name__ )
model.eval()
quant_trainer.enable_calibration(__magic_name__ )
logger.info("""***** Running calibration *****""" )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(__magic_name__ ):
# Prediction step
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prediction_step(__magic_name__ , __magic_name__ , prediction_loss_only=__magic_name__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = model
def lowerCamelCase_ ( self : Any , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str = "eval" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
self.log(__magic_name__ )
else:
UpperCamelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __magic_name__ )
return metrics
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : str = "test" ):
"""simple docstring"""
UpperCamelCase = self.get_test_dataloader(__magic_name__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
__magic_name__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase = self.post_process_function(__magic_name__ , __magic_name__ , output.predictions , """predict""" )
UpperCamelCase = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase = metrics.pop(__magic_name__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : Optional[Any]="./" ):
"""simple docstring"""
UpperCamelCase = self.eval_dataset
UpperCamelCase = self.get_eval_dataloader(__magic_name__ )
UpperCamelCase = next(iter(__magic_name__ ) )
# saving device - to make it consistent
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCamelCase = tuple(v.to(__magic_name__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase = True
UpperCamelCase = self.model.to(__magic_name__ )
model.eval()
model.float()
UpperCamelCase = model.module if hasattr(__magic_name__ , """module""" ) else model
quant_trainer.configure_model(__magic_name__ , self.quant_trainer_args )
UpperCamelCase = os.path.join(__magic_name__ , """model.onnx""" )
logger.info(F'exporting model to {output_model_file}' )
UpperCamelCase = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
__magic_name__ , __magic_name__ , __magic_name__ , export_params=__magic_name__ , opset_version=1_3 , do_constant_folding=__magic_name__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=__magic_name__ , )
logger.info("""onnx export finished""" )
| 181 | 1 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__A : Dict = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : nn.ModuleList , UpperCamelCase : nn.ModuleList , UpperCamelCase : List[int] ):
"""simple docstring"""
A__ : List[str] =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase ) == len(UpperCamelCase ), F'''{len(UpperCamelCase )} != {len(UpperCamelCase )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__A : int = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__A : List[str] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : int ):
"""simple docstring"""
try:
A__ : int =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(UpperCamelCase ) )
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(UpperCamelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowercase ( UpperCamelCase : Union[str, PreTrainedModel] , UpperCamelCase : Union[str, Path] = "student" , UpperCamelCase : Union[int, None] = None , UpperCamelCase : Union[int, None] = None , UpperCamelCase : int=False , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict , ):
"""simple docstring"""
A__ : Optional[int] ="encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase , UpperCamelCase ):
AutoTokenizer.from_pretrained(UpperCamelCase ).save_pretrained(UpperCamelCase ) # purely for convenience
A__ : int =AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ).eval()
else:
assert isinstance(UpperCamelCase , UpperCamelCase ), F'''teacher must be a model or string got type {type(UpperCamelCase )}'''
A__ : Optional[Any] =teacher.config.to_diff_dict()
try:
A__ , A__ : Optional[int] =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
A__ : Any =teacher_e
if d is None:
A__ : Tuple =teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
A__ , A__ : Any =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
A__ , A__ : Dict =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
A__ : Any =teacher_e
if d is None:
A__ : Union[str, Any] =teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase )
# Copy weights
A__ : List[Any] =teacher.config_class(**UpperCamelCase )
A__ : str =AutoModelForSeqaSeqLM.from_config(UpperCamelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
A__ : List[Any] =student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
A__ , A__ : Any =list(range(UpperCamelCase ) ), list(range(UpperCamelCase ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(UpperCamelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
A__ : List[int] =pick_layers_to_copy(UpperCamelCase , UpperCamelCase )
if d_layers_to_copy is None:
A__ : List[int] =pick_layers_to_copy(UpperCamelCase , UpperCamelCase )
try:
if hasattr(
UpperCamelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
A__ : List[str] ={
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase_ : int = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def A__ ( snake_case_ : Dict=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE__: int= subparsers.add_parser('''tpu-config''' , description=_description )
else:
SCREAMING_SNAKE_CASE__: Tuple= argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE__: Optional[int]= parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=snake_case_ , default=snake_case_ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=snake_case_ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=snake_case_ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=snake_case_ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def A__ ( snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Dict= None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(snake_case_ ):
SCREAMING_SNAKE_CASE__: str= load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE__: int= defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE__: Tuple= defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE__: int= defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE__: Tuple= defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE__: str= '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE__: Any= '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , snake_case_ ):
SCREAMING_SNAKE_CASE__: Dict= F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
SCREAMING_SNAKE_CASE__: List[str]= [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , snake_case_ ):
SCREAMING_SNAKE_CASE__: Dict= [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE__: Optional[int]= ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
SCREAMING_SNAKE_CASE__: List[Any]= '''; '''.join(snake_case_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE__: int= ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(snake_case_ )}' )
return
subprocess.run(snake_case_ )
print('''Successfully setup pod.''' )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= tpu_command_parser()
SCREAMING_SNAKE_CASE__: Tuple= parser.parse_args()
tpu_command_launcher(snake_case_ )
| 107 | def A__ ( snake_case_ : str ):
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__: Dict= fast.next.next
SCREAMING_SNAKE_CASE__: Union[str, Any]= slow.next
SCREAMING_SNAKE_CASE__: Union[str, Any]= slow.next
SCREAMING_SNAKE_CASE__: Union[str, Any]= None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__: Optional[int]= None
while second:
SCREAMING_SNAKE_CASE__: Any= second.next
SCREAMING_SNAKE_CASE__: int= node
SCREAMING_SNAKE_CASE__: Optional[Any]= second
SCREAMING_SNAKE_CASE__: Any= nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__: Tuple= node.next
SCREAMING_SNAKE_CASE__: Optional[int]= head.next
return True
def A__ ( snake_case_ : Optional[Any] ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__: List[Any]= head
while fast and fast.next:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__: Optional[Any]= [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__: Optional[int]= slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__: Tuple= cur.next
return True
def A__ ( snake_case_ : Any ):
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__: Optional[int]= {}
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
while head:
if head.val in d:
d[head.val].append(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= [pos]
SCREAMING_SNAKE_CASE__: Dict= head.next
pos += 1
SCREAMING_SNAKE_CASE__: Dict= pos - 1
SCREAMING_SNAKE_CASE__: str= 0
for v in d.values():
if len(snake_case_ ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= 0
for i in range(0 , len(snake_case_ ) ):
if v[i] + v[len(snake_case_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 107 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:bool = False
SCREAMING_SNAKE_CASE:float = 3.0
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__snake_case ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def lowercase__ ( self ):
"""simple docstring"""
# If no defaults are changed, `to_kwargs` returns an empty dict.
a__ = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
a__ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
a__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __snake_case )
@require_multi_gpu
def lowercase__ ( self ):
"""simple docstring"""
a__ = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__snake_case , env=os.environ.copy() )
if __name__ == "__main__":
__A : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__A : List[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
__A : str = torch.nn.Linear(1_00, 2_00)
__A : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
__A : List[str] = ''
__A : Optional[Any] = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 394 |
import qiskit
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
_a : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a : List[Any] = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 471 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=() , __UpperCamelCase=None , __UpperCamelCase="no" , __UpperCamelCase="29500" ):
__lowercase : Union[str, Any] = False
__lowercase : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
__lowercase : List[Any] = True
elif "IPython" in sys.modules:
__lowercase : Optional[int] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
__lowercase : Dict = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
__lowercase : Union[str, Any] = 8
__lowercase : List[str] = PrepareForLaunch(__UpperCamelCase , distributed_type='''TPU''' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(__UpperCamelCase , args=__UpperCamelCase , nprocs=__UpperCamelCase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCamelCase , master_addr='''127.0.01''' , master_port=__UpperCamelCase , mixed_precision=__UpperCamelCase ):
__lowercase : Union[str, Any] = PrepareForLaunch(__UpperCamelCase , distributed_type='''MULTI_GPU''' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(__UpperCamelCase , args=__UpperCamelCase , nprocs=__UpperCamelCase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowercase : List[str] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=() , __UpperCamelCase=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCamelCase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
__lowercase : Dict = PrepareForLaunch(__UpperCamelCase , debug=__UpperCamelCase )
start_processes(__UpperCamelCase , args=__UpperCamelCase , nprocs=__UpperCamelCase , start_method='''fork''' )
| 523 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=2 , UpperCamelCase_=56 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=2 , UpperCamelCase_=7 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , UpperCamelCase_="block_sparse" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=3 , ) -> Any:
__lowercase : Optional[Any] = parent
__lowercase : Optional[Any] = batch_size
__lowercase : Optional[int] = seq_length
__lowercase : Any = is_training
__lowercase : int = use_attention_mask
__lowercase : List[Any] = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : Tuple = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : int = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : Union[str, Any] = num_choices
__lowercase : Dict = rescale_embeddings
__lowercase : int = attention_type
__lowercase : Tuple = use_bias
__lowercase : Tuple = block_size
__lowercase : Dict = num_random_blocks
def _lowerCamelCase ( self ) -> Any:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Tuple = None
if self.use_attention_mask:
__lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[str] = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase : int = config_and_inputs
__lowercase : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : List[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> List[str]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> Union[str, Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> Any:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> str:
super().test_hidden_states_output()
@slow
def _lowerCamelCase ( self ) -> Any:
for model_class_name in self.all_model_classes:
__lowercase : Optional[Any] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(UpperCamelCase_ )
def _lowerCamelCase ( self ) -> str:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase ,__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Union[str, Any] = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
__lowercase : Dict = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowercase : Optional[Any] = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1E-5 , UpperCamelCase_="outputs" , UpperCamelCase_=None ) -> Optional[int]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 523 | 1 |
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [0] * len(lowerCAmelCase )
UpperCAmelCase = []
UpperCAmelCase = [1] * len(lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase )
while queue:
UpperCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase )
print(max(lowerCAmelCase ) )
# Adjacency list of Graph
lowerCAmelCase_ : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 673 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCamelCase__ =start
lowerCamelCase__ =end
lowerCamelCase__ =val
lowerCamelCase__ =(start + end) // 2
lowerCamelCase__ =left
lowerCamelCase__ =right
def __repr__( self ):
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =collection
lowerCamelCase__ =function
if self.collection:
lowerCamelCase__ =self._build_tree(0 , len(_lowerCamelCase ) - 1 )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
self._update_tree(self.root , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
return self._query_range(self.root , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if start == end:
return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.collection[start] )
lowerCamelCase__ =(start + end) // 2
lowerCamelCase__ =self._build_tree(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =self._build_tree(mid + 1 , _lowerCamelCase )
return SegmentTreeNode(_lowerCamelCase , _lowerCamelCase , self.fn(left.val , right.val ) , _lowerCamelCase , _lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if node.start == i and node.end == i:
lowerCamelCase__ =val
return
if i <= node.mid:
self._update_tree(node.left , _lowerCamelCase , _lowerCamelCase )
else:
self._update_tree(node.right , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =self.fn(node.left.val , node.right.val )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _lowerCamelCase , _lowerCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , _lowerCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
if self.root is not None:
lowerCamelCase__ =Queue()
queue.put(self.root )
while not queue.empty():
lowerCamelCase__ =queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
a =SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 132 | """simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> str:
'''simple docstring'''
lowerCamelCase__ =[]
lowerCamelCase__ =[]
lowerCamelCase__ ={
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowerCamelCase__ =len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(__lowerCAmelCase ) , "Postfix".center(__lowerCAmelCase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCAmelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCAmelCase ) == 0:
stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCAmelCase ) # push x to stack
print(
x.center(8 ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=" | " , ) # Output in tabular format
while len(__lowerCAmelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=" | " , ) # Output in tabular format
return "".join(__lowerCAmelCase ) # return Postfix as str
def lowerCamelCase_ ( __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__ =list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCAmelCase ) ):
if infix[i] == "(":
lowerCamelCase__ =")" # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ ="(" # change ")" to "("
return (infix_2_postfix("".join(__lowerCAmelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a =input('\nEnter an Infix Equation = ') # Input an Infix equation
a =''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 132 | 1 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
A_ = logging.get_logger(__name__)
@dataclass
class __lowerCamelCase :
a__: str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
a__: str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
a__: int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.task_name.lower()
class __lowerCamelCase ( lowerCAmelCase ):
a__: Dict = 'train'
a__: str = 'dev'
a__: Union[str, Any] = 'test'
class __lowerCamelCase ( lowerCAmelCase ):
a__: GlueDataTrainingArguments
a__: str
a__: List[InputFeatures]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = Split.train , UpperCAmelCase = None , ):
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , UpperCAmelCase , )
lowerCamelCase_ = args
lowerCamelCase_ = glue_processors[args.task_name]()
lowerCamelCase_ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
lowerCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_ , lowerCamelCase_ = label_list[2], label_list[1]
lowerCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + '''.lock'''
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase_ = examples[:limit_length]
lowerCamelCase_ = glue_convert_examples_to_features(
UpperCAmelCase , UpperCAmelCase , max_length=args.max_seq_length , label_list=UpperCAmelCase , output_mode=self.output_mode , )
lowerCamelCase_ = time.time()
torch.save(self.features , UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
return self.features[i]
def UpperCAmelCase__ ( self ):
return self.label_list
| 29 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: List[str] = '''▁'''
__a: Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__a: Tuple = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__a: Any = {'''vinai/bartpho-syllable''': 1024}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]="<s>" , lowerCamelCase : Any="</s>" , lowerCamelCase : int="</s>" , lowerCamelCase : List[str]="<s>" , lowerCamelCase : Tuple="<unk>" , lowerCamelCase : Optional[int]="<pad>" , lowerCamelCase : List[Any]="<mask>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Optional[int] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = monolingual_vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCAmelCase = {}
_UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = cnt
cnt += 1
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_UpperCAmelCase = line.strip().split()[0]
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , lowerCamelCase : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowerCamelCase ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : int , lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase ( self : Tuple , lowerCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase ( self : Any , lowerCamelCase : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """""".join(lowerCamelCase ).replace(lowerCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file | 402 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Any = logging.get_logger(__name__)
__a: int = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''transfo-xl'''
_lowerCamelCase = ['''mems''']
_lowerCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , lowerCamelCase : int=26_7735 , lowerCamelCase : Union[str, Any]=[2_0000, 4_0000, 20_0000] , lowerCamelCase : Any=1024 , lowerCamelCase : List[str]=1024 , lowerCamelCase : Optional[Any]=16 , lowerCamelCase : Any=64 , lowerCamelCase : Optional[Any]=4096 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=18 , lowerCamelCase : List[Any]=1600 , lowerCamelCase : List[str]=1000 , lowerCamelCase : Any=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=-1 , lowerCamelCase : str=True , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : int=True , lowerCamelCase : str="normal" , lowerCamelCase : int=0.01 , lowerCamelCase : int=0.01 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1E-5 , lowerCamelCase : Union[str, Any]=0 , **lowerCamelCase : Dict , ) -> int:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_UpperCAmelCase = [False] + [False] * len(self.cutoffs )
_UpperCAmelCase = d_model
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = div_val
_UpperCAmelCase = pre_lnorm
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = mem_len
_UpperCAmelCase = same_length
_UpperCAmelCase = attn_type
_UpperCAmelCase = clamp_len
_UpperCAmelCase = sample_softmax
_UpperCAmelCase = adaptive
_UpperCAmelCase = dropout
_UpperCAmelCase = dropatt
_UpperCAmelCase = untie_r
_UpperCAmelCase = init
_UpperCAmelCase = init_range
_UpperCAmelCase = proj_init_std
_UpperCAmelCase = init_std
_UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self : Dict , lowerCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 402 | 1 |
"""simple docstring"""
def __A ( a_ : list , a_ : int = 0 )-> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = length or len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Optional[Any] = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 698 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __A ( a_ : Callable[[int | float], int | float] , a_ : int | float , a_ : int | float , a_ : int = 1_00 , )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = x_start
SCREAMING_SNAKE_CASE : Union[str, Any] = fnc(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for _ in range(a_ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE : Optional[int] = fnc(a_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE : str = xa
SCREAMING_SNAKE_CASE : Any = fxa
return length
if __name__ == "__main__":
def __A ( a_ : Optional[Any] )-> List[Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase__ : str = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 698 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
__a: Optional[int] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__a: Any = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__a: int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def _lowerCAmelCase( self ) -> Optional[int]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="uniform_average" , __lowerCAmelCase=True ) -> Tuple:
lowercase__ : Any = mean_squared_error(
__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase , multioutput=__lowerCAmelCase , squared=__lowerCAmelCase )
return {"mse": mse}
| 714 | '''simple docstring'''
import argparse
import datetime
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : int = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
lowercase__ : Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCAmelCase ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
lowercase__ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
lowercase__ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
lowercase__ : List[Any] = datetime.date(int(UpperCAmelCase ) , int(UpperCAmelCase ) , int(UpperCAmelCase ) )
# Start math
if m <= 2:
lowercase__ : Tuple = y - 1
lowercase__ : Tuple = m + 12
# maths var
lowercase__ : int = int(str(UpperCAmelCase )[:2] )
lowercase__ : int = int(str(UpperCAmelCase )[2:] )
lowercase__ : int = int(2.6 * m - 5.3_9 )
lowercase__ : int = int(c / 4 )
lowercase__ : int = int(k / 4 )
lowercase__ : int = int(d + k )
lowercase__ : int = int(t + u + v + x )
lowercase__ : int = int(z - (2 * c) )
lowercase__ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
lowercase__ : str = F"""Your date {date_input}, is a {days[str(UpperCAmelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: List[Any] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
__a: List[Any] = parser.parse_args()
zeller(args.date_input)
| 428 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( _a , _a ):
@register_to_config
def __init__( self: Optional[Any] ,__lowerCAmelCase: bool ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : List[str] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCamelCase : List[Any] = torch.zeros(__lowerCAmelCase ,__lowerCAmelCase )
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = torch.nn.Parameter(__lowerCAmelCase )
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self: Tuple ,__lowerCAmelCase: VQModel ,__lowerCAmelCase: CLIPTextModel ,__lowerCAmelCase: CLIPTokenizer ,__lowerCAmelCase: TransformeraDModel ,__lowerCAmelCase: VQDiffusionScheduler ,__lowerCAmelCase: LearnedClassifierFreeSamplingEmbeddings ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__lowerCAmelCase ,transformer=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,learned_classifier_free_sampling_embeddings=__lowerCAmelCase ,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = len(__lowerCAmelCase ) if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else 1
# get prompt text embeddings
_lowerCamelCase : Dict = self.tokenizer(
__lowerCAmelCase ,padding="max_length" ,max_length=self.tokenizer.model_max_length ,return_tensors="pt" ,)
_lowerCamelCase : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCamelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCamelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCamelCase : int = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=__lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase : Union[str, Any] = prompt_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCamelCase : Optional[int] = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCamelCase : int = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCAmelCase ,1 ,1 )
else:
_lowerCamelCase : str = [""] * batch_size
_lowerCamelCase : Optional[Any] = text_input_ids.shape[-1]
_lowerCamelCase : Dict = self.tokenizer(
__lowerCAmelCase ,padding="max_length" ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ,return_tensors="pt" ,)
_lowerCamelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCamelCase : Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=__lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : Any = negative_prompt_embeds.shape[1]
_lowerCamelCase : int = negative_prompt_embeds.repeat(1 ,__lowerCAmelCase ,1 )
_lowerCamelCase : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__lowerCAmelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : int = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple ,__lowerCAmelCase: Union[str, List[str]] ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 5.0 ,__lowerCAmelCase: float = 1.0 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__lowerCAmelCase: int = 1 ,):
'''simple docstring'''
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = 1
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = len(__lowerCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Dict = batch_size * num_images_per_prompt
_lowerCamelCase : Tuple = guidance_scale > 1.0
_lowerCamelCase : Optional[Any] = self._encode_prompt(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCAmelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
_lowerCamelCase : Any = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCamelCase : int = self.transformer.num_vector_embeds - 1
_lowerCamelCase : List[Any] = torch.full(__lowerCAmelCase ,__lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_lowerCamelCase : Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase ,device=self.device )
_lowerCamelCase : List[Any] = self.scheduler.timesteps.to(self.device )
_lowerCamelCase : List[Any] = latents
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCamelCase : List[Any] = self.transformer(__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,timestep=__lowerCAmelCase ).sample
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Optional[int] = model_output.chunk(2 )
_lowerCamelCase : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCAmelCase ,dim=1 ,keepdim=__lowerCAmelCase )
_lowerCamelCase : Dict = self.truncate(__lowerCAmelCase ,__lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
_lowerCamelCase : Any = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Dict = self.scheduler.step(__lowerCAmelCase ,timestep=__lowerCAmelCase ,sample=__lowerCAmelCase ,generator=__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.vqvae.config.vq_embed_dim
_lowerCamelCase : Any = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCamelCase : List[Any] = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase ,shape=__lowerCAmelCase )
_lowerCamelCase : Any = self.vqvae.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase ).sample
_lowerCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 ,1 )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Dict = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: float ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Tuple = torch.sort(__lowerCAmelCase ,1 ,descending=__lowerCAmelCase )
_lowerCamelCase : int = torch.exp(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCamelCase : Tuple = torch.full_like(keep_mask[:, 0:1, :] ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.cat((all_true, keep_mask) ,dim=1 )
_lowerCamelCase : List[str] = keep_mask[:, :-1, :]
_lowerCamelCase : Optional[Any] = keep_mask.gather(1 ,indices.argsort(1 ) )
_lowerCamelCase : List[str] = log_p_x_0.clone()
_lowerCamelCase : Optional[Any] = -torch.inf # -inf = log(0)
return rv | 46 | """simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[2, 2, 3, 2] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=["stage2", "stage3", "stage4"] ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : Any = num_channels
lowercase_ : Optional[int] = num_stages
lowercase_ : Dict = hidden_sizes
lowercase_ : int = depths
lowercase_ : Optional[Any] = is_training
lowercase_ : Tuple = use_labels
lowercase_ : int = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Any = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : List[Any] = out_features
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = scope
lowercase_ : Optional[int] = num_stages
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCamelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCamelCase ,loss_ignore_index=255 ,num_labels=self.num_labels ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = UperNetModelTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__UpperCamelCase )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Tuple = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = _config_zero_init(__UpperCamelCase )
lowercase_ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip(reason='UperNet does not have tied weights' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowercase_ : Union[str, Any] = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowercase_ : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : str = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowercase_ : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCamelCase )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Optional[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
with torch.no_grad():
lowercase_ : Optional[int] = model(**__UpperCamelCase )
lowercase_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : int = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 425 | 0 |
def __lowerCamelCase ( A__ : Any ) -> bool:
lowerCamelCase_ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 719 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "van"
def __init__( self : int , __a : List[Any]=224 , __a : Dict=3 , __a : List[str]=[7, 3, 3, 3] , __a : Any=[4, 2, 2, 2] , __a : str=[64, 128, 320, 512] , __a : Dict=[3, 3, 12, 3] , __a : List[str]=[8, 8, 4, 4] , __a : List[str]="gelu" , __a : Optional[Any]=0.02 , __a : Dict=1e-6 , __a : List[str]=1e-2 , __a : Optional[int]=0.0 , __a : str=0.0 , **__a : Optional[Any] , ) ->str:
super().__init__(**__a )
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Union[str, Any] = patch_sizes
lowerCamelCase_ : List[Any] = strides
lowerCamelCase_ : Union[str, Any] = hidden_sizes
lowerCamelCase_ : Tuple = depths
lowerCamelCase_ : str = mlp_ratios
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = layer_scale_init_value
lowerCamelCase_ : List[str] = drop_path_rate
lowerCamelCase_ : str = dropout_rate
| 171 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = state_dict.pop(_lowerCamelCase )
__lowerCAmelCase = val
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCAmelCase = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
return new_state_dict
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = ""
if is_panoptic:
__lowerCAmelCase = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__lowerCAmelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:256, :]
__lowerCAmelCase = in_proj_bias[:256]
__lowerCAmelCase = in_proj_weight[256:512, :]
__lowerCAmelCase = in_proj_bias[256:512]
__lowerCAmelCase = in_proj_weight[-256:, :]
__lowerCAmelCase = in_proj_bias[-256:]
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCAmelCase = "resnet101"
if "dc5" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = "panoptic" in model_name
if is_panoptic:
__lowerCAmelCase = 250
else:
__lowerCAmelCase = 91
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "coco-detection-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCAmelCase = "coco_panoptic" if is_panoptic else "coco_detection"
__lowerCAmelCase = ConditionalDetrImageProcessor(format=_lowerCamelCase )
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_lowerCamelCase , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
__lowerCAmelCase = torch.hub.load("DeppMeng/ConditionalDETR" , _lowerCamelCase , pretrained=_lowerCamelCase ).eval()
__lowerCAmelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCAmelCase = "conditional_detr." + src
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase , is_panoptic=_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCAmelCase = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__lowerCAmelCase = state_dict.pop(_lowerCamelCase )
__lowerCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCAmelCase = state_dict.pop(_lowerCamelCase )
__lowerCAmelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__lowerCAmelCase = state_dict.pop(_lowerCamelCase )
__lowerCAmelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__lowerCAmelCase = state_dict.pop(_lowerCamelCase )
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = ConditionalDetrForSegmentation(_lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCamelCase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
__lowerCAmelCase = conditional_detr(_lowerCamelCase )
__lowerCAmelCase = model(_lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : Tuple = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 636 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('eta', 0.0), ('num_inference_steps', 5_0))
def _lowercase ( self: List[str] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = 10, 0.0
_lowerCamelCase : List[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
return sample
def _lowercase ( self: List[str] ):
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self: Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase ,beta_end=__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase ,prediction_type=__lowerCAmelCase ,sample_max_value=__lowerCAmelCase ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase ,num_inference_steps=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase ,eta=__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[int] = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : List[str] = self.dummy_sample_deter + 0.1
_lowerCamelCase : Dict = self.dummy_sample_deter - 0.1
_lowerCamelCase : Union[str, Any] = samplea.shape[0]
_lowerCamelCase : List[Any] = torch.stack([samplea, samplea, samplea] ,dim=0 )
_lowerCamelCase : Dict = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 ,__lowerCAmelCase )
_lowerCamelCase : str = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_lowerCamelCase : List[str] = scheduler.batch_step_no_noise(__lowerCAmelCase ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__lowerCAmelCase )
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Any = self.full_loop()
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=__lowerCAmelCase ,beta_start=0.01 )
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 46 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = TransfoXLTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase: List[Any] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
__UpperCAmelCase: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowercase_ ( self , **snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = """<unk> UNwanted , running"""
__UpperCAmelCase: Any = """<unk> unwanted, running"""
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=snake_case_ )
__UpperCAmelCase: Optional[int] = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(snake_case_ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [0, 4, 8, 7] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = TransfoXLTokenizer(lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = TransfoXLTokenizer(lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = TransfoXLTokenizer(lower_case=snake_case_ )
__UpperCAmelCase: Tuple = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
__UpperCAmelCase: Union[str, Any] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(snake_case_ ) , snake_case_ )
self.assertEqual(tokenizer.convert_tokens_to_string(snake_case_ ) , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.get_tokenizer()
__UpperCAmelCase: Union[str, Any] = len(snake_case_ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(snake_case_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" ) | 466 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 466 | 1 |
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int:
"""simple docstring"""
__A = 3
__A = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Any=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Optional[int]=36 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=4 , lowerCAmelCase : int=37 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Tuple=6 , lowerCAmelCase : Tuple=6 , lowerCAmelCase : Dict=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=10_00 , ) -> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase__ = text_seq_length
lowercase__ = (image_size // patch_size) ** 2 + 1
lowercase__ = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
lowercase__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = tmp_coordinate
lowercase__ = tf.constant(lowerCAmelCase)
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.text_seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
lowercase__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = TFLayoutLMvaModel(config=lowerCAmelCase)
# text + image
lowercase__ = model(lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , training=lowerCAmelCase , )
lowercase__ = model(lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
lowercase__ = model(lowerCAmelCase , training=lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowercase__ = model({'pixel_values': pixel_values} , training=lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
lowercase__ = 2
lowercase__ = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__)) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A : List[Any] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A : Any = False
A : Any = False
A : Dict = False
def UpperCAmelCase ( self : int , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
return True
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=False) -> dict:
"""simple docstring"""
lowercase__ = copy.deepcopy(lowerCAmelCase)
if model_class in get_values(lowerCAmelCase):
lowercase__ = {
k: tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(lowerCAmelCase , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase):
lowercase__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase):
lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase):
lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase):
lowercase__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
lowercase__ = TFLayoutLMvaModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
if getattr(lowerCAmelCase , 'hf_compute_loss' , lowerCAmelCase):
# The number of elements in the loss should be the same as the number of elements in the label
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase)[0]
]
lowercase__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = prepared_for_class.pop('input_ids')
lowercase__ = model(lowerCAmelCase , **lowerCAmelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = prepared_for_class.pop('input_ids')
if "labels" in prepared_for_class:
lowercase__ = prepared_for_class['labels'].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
lowercase__ = -1_00
lowercase__ = tf.convert_to_tensor(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , **lowerCAmelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
lowercase__ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase)
# Get keys that were added with the _prepare_for_class function
lowercase__ = prepared_for_class.keys() - inputs_dict.keys()
lowercase__ = inspect.signature(model.call).parameters
lowercase__ = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
lowercase__ = {0: 'input_ids'}
for label_key in label_keys:
lowercase__ = signature_names.index(lowerCAmelCase)
lowercase__ = label_key
lowercase__ = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
lowercase__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
lowercase__ = prepared_for_class[value]
lowercase__ = tuple(lowerCAmelCase)
# Send to model
lowercase__ = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def _lowerCAmelCase ( ):
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base')
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCAmelCase , return_tensors='tf').pixel_values
lowercase__ = tf.constant([[1, 2]])
lowercase__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
lowercase__ = model(input_ids=lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase)
# verify the logits
lowercase__ = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase)
lowercase__ = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1E-4))
| 622 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''gptsan-japanese'''
UpperCamelCase_ = [
'''past_key_values''',
]
UpperCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A_=3_6000 , A_=1280 , A_=1024 , A_=8192 , A_=4096 , A_=128 , A_=10 , A_=0 , A_=16 , A_=16 , A_=128 , A_=0.0 , A_=1E-5 , A_=False , A_=0.0 , A_="float32" , A_=False , A_=False , A_=False , A_=0.0_02 , A_=False , A_=True , A_=3_5998 , A_=3_5995 , A_=3_5999 , **A_ , ) -> Optional[Any]:
"""simple docstring"""
_lowercase: int = vocab_size
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: str = d_model
_lowercase: Tuple = d_ff
_lowercase: int = d_ext
_lowercase: Union[str, Any] = d_spout
_lowercase: Union[str, Any] = num_switch_layers
_lowercase: List[str] = num_ext_layers
_lowercase: List[Any] = num_switch_layers + num_ext_layers
_lowercase: List[str] = num_heads
_lowercase: Dict = num_experts
_lowercase: Tuple = expert_capacity
_lowercase: str = dropout_rate
_lowercase: List[str] = layer_norm_epsilon
_lowercase: Dict = router_bias
_lowercase: Tuple = router_jitter_noise
_lowercase: List[Any] = router_dtype
_lowercase: Union[str, Any] = router_ignore_padding_tokens
_lowercase: Tuple = output_hidden_states
_lowercase: str = output_attentions
_lowercase: int = initializer_factor
_lowercase: int = output_router_logits
_lowercase: List[str] = use_cache
super().__init__(
separator_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , **A_ , )
| 272 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_lowercase: Tuple = [True] * (num + 1)
_lowercase: List[str] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCamelCase ):
_lowercase: List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 272 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *UpperCamelCase__ :List[Any] , **UpperCamelCase__ :int ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 388 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :List[str] , UpperCamelCase__ :Dict=0.01 , UpperCamelCase__ :Union[str, Any]=1_000 ):
_a = p_stop
_a = max_length
def __iter__( self :Dict ):
_a = 0
_a = False
while not stop and count < self.max_length:
yield count
count += 1
_a = random.random() < self.p_stop
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str=False , UpperCamelCase__ :int=True ):
_a = [
BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
for i in range(2 )
]
_a = [list(UpperCamelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCamelCase__ ) for shard in batch_sampler_shards] , [len(UpperCamelCase__ ) for e in expected] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_a = [BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :str=False , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :int=False ):
random.seed(UpperCamelCase__ )
_a = list(UpperCamelCase__ )
_a = [
IterableDatasetShard(
UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ , num_processes=UpperCamelCase__ , process_index=UpperCamelCase__ , split_batches=UpperCamelCase__ , )
for i in range(UpperCamelCase__ )
]
_a = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCamelCase__ )
iterable_dataset_lists.append(list(UpperCamelCase__ ) )
_a = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_a = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
self.assertTrue(len(UpperCamelCase__ ) % shard_batch_size == 0 )
_a = []
for idx in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
reference += reference
self.assertListEqual(UpperCamelCase__ , reference[: len(UpperCamelCase__ )] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = 42
_a = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Edge case with a very small dataset
_a = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = SkipBatchSampler(UpperCamelCase__ , 2 )
self.assertListEqual(list(UpperCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = DataLoader(list(range(16 ) ) , batch_size=4 )
_a = skip_first_batches(UpperCamelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
Accelerator()
_a = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 388 | 1 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def snake_case ( snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if hor == 128:
lowerCAmelCase = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCAmelCase = (32, 128, 256)
lowerCAmelCase = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCAmelCase = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCAmelCase = (32, 64, 128, 256)
lowerCAmelCase = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCAmelCase = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCAmelCase = model.state_dict()
lowerCAmelCase = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCAmelCase = UNetaDModel(**snake_case )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase = state_dict.pop(snake_case )
hf_value_function.load_state_dict(snake_case )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(snake_case , snake_case )
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCAmelCase = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCAmelCase = model
lowerCAmelCase = UNetaDModel(**snake_case )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase = state_dict.pop(snake_case )
hf_value_function.load_state_dict(snake_case )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(snake_case , snake_case )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 514 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_UpperCamelCase : List[Any] = "\\n\n"
_UpperCamelCase : List[Any] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_UpperCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCAmelCase = 'cuda'
else:
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCAmelCase = model.config.max_length - 1
else:
lowerCAmelCase = model.config.max_length
lowerCAmelCase = tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors='pt' , return_attention_mask=_SCREAMING_SNAKE_CASE , ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = encodings['input_ids']
lowerCAmelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCAmelCase = []
lowerCAmelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = min(start_index + batch_size , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = encoded_texts[start_index:end_index]
lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
lowerCAmelCase = encoded_batch
with torch.no_grad():
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ).logits
lowerCAmelCase = out_logits[..., :-1, :].contiguous()
lowerCAmelCase = labels[..., 1:].contiguous()
lowerCAmelCase = attn_mask[..., 1:].contiguous()
lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_SCREAMING_SNAKE_CASE )}
| 514 | 1 |
def UpperCAmelCase ( a_ = 1_0 , a_ = 1_0_0_0 , a_ = True ) -> int:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and isinstance(a_ , a_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def UpperCAmelCase ( a_ , a_ , a_ ) -> None:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and isinstance(a_ , a_ ) and isinstance(a_ , a_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(a_ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
__A = lower
__A = higher
__A = []
while True:
__A = get_avg(a_ , a_ )
last_numbers.append(a_ )
if answer(a_ ) == "low":
__A = number
elif answer(a_ ) == "high":
__A = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = int(input("Enter lower value : " ).strip() )
__A = int(input("Enter high value : " ).strip() )
__A = int(input("Enter value to guess : " ).strip() )
guess_the_number(a_ , a_ , a_ )
if __name__ == "__main__":
main()
| 55 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowercase = OpenAIGPTTokenizer
lowercase = OpenAIGPTTokenizerFast
lowercase = True
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__UpperCamelCase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __UpperCAmelCase=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
__UpperCamelCase = 'This is a simple input'
__UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase = ('This is a simple input', 'This is a pair')
__UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase__ ):
pass
| 709 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "efficientformer"
def __init__( self , __UpperCAmelCase = [3, 2, 6, 4] , __UpperCAmelCase = [48, 96, 224, 448] , __UpperCAmelCase = [True, True, True, True] , __UpperCAmelCase = 448 , __UpperCAmelCase = 32 , __UpperCAmelCase = 4 , __UpperCAmelCase = 7 , __UpperCAmelCase = 5 , __UpperCAmelCase = 8 , __UpperCAmelCase = 4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 16 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = 2 , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.0_2 , __UpperCAmelCase = 1E-12 , __UpperCAmelCase = 224 , __UpperCAmelCase = 1E-05 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = depths
__UpperCamelCase = mlp_expansion_ratio
__UpperCamelCase = downsamples
__UpperCamelCase = dim
__UpperCamelCase = key_dim
__UpperCamelCase = attention_ratio
__UpperCamelCase = resolution
__UpperCamelCase = pool_size
__UpperCamelCase = downsample_patch_size
__UpperCamelCase = downsample_stride
__UpperCamelCase = downsample_pad
__UpperCamelCase = drop_path_rate
__UpperCamelCase = num_metaad_blocks
__UpperCamelCase = distillation
__UpperCamelCase = use_layer_scale
__UpperCamelCase = layer_scale_init_value
__UpperCamelCase = image_size
__UpperCamelCase = batch_norm_eps
| 293 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
@property
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
A_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.dummy_uncond_unet
A_ = ScoreSdeVeScheduler()
A_ = ScoreSdeVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
sde_ve.to(lowerCamelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowerCamelCase__ ).images
A_ = torch.manual_seed(0 )
A_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowerCamelCase__ , return_dict=lowerCamelCase__ )[
0
]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = '''google/ncsnpp-church-256'''
A_ = UNetaDModel.from_pretrained(lowerCamelCase__ )
A_ = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase__ )
A_ = ScoreSdeVePipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
sde_ve.to(lowerCamelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = sde_ve(num_inference_steps=1_0 , output_type='''numpy''' , generator=lowerCamelCase__ ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 203 |
__lowercase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
A_ = Stack()
A_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
A_ = operator_stack.peek()
operator_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowercase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 203 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__magic_name__ : List[Any] = None
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Any = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__magic_name__ : Tuple = '''▁'''
class A__ ( snake_case__ ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = BigBirdTokenizer
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = []
def __init__( self : str , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : List[str]="<unk>" , _SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE : Dict="</s>" , _SCREAMING_SNAKE_CASE : List[str]="<pad>" , _SCREAMING_SNAKE_CASE : Any="[SEP]" , _SCREAMING_SNAKE_CASE : List[str]="[MASK]" , _SCREAMING_SNAKE_CASE : int="[CLS]" , **_SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any = None , _SCREAMING_SNAKE_CASE : Optional[int] = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 711 |
import argparse
import os
import re
import packaging.version
__magic_name__ : Dict = '''examples/'''
__magic_name__ : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__magic_name__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__magic_name__ : int = '''README.md'''
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.read()
UpperCamelCase , UpperCamelCase = REPLACE_PATTERNS[pattern]
UpperCamelCase = replace.replace('VERSION' , _UpperCamelCase)
UpperCamelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase)
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.write(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(_UpperCamelCase):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects')
if "legacy" in directories:
directories.remove('legacy')
for fname in fnames:
if fname.endswith('.py'):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase , pattern='examples')
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=False) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if not patch:
update_version_in_examples(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = '🤗 Transformers currently provides the following architectures'
UpperCamelCase = '1. Want to contribute a new model?'
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.readlines()
# Find the start of the list.
UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith('1.'):
UpperCamelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r') as f:
UpperCamelCase = f.read()
UpperCamelCase = REPLACE_PATTERNS['init'][0].search(_UpperCamelCase).groups()[0]
return packaging.version.parse(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase=False) -> str:
"""simple docstring"""
UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!')
if default_version.is_devrelease:
UpperCamelCase = default_version.base_version
elif patch:
UpperCamelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase = input(F'Which version are you releasing? [{default_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = default_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase , patch=_UpperCamelCase)
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
def lowercase__ ( ) -> int:
"""simple docstring"""
UpperCamelCase = get_version()
UpperCamelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase = current_version.base_version
# Check with the user we got that right.
UpperCamelCase = input(F'Which version are we developing now? [{dev_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = dev_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase)
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__magic_name__ : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 410 | 0 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case = logging.get_logger(__name__)
def _lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ):
def run_func(lowerCamelCase__ : int ):
@wraps(lowerCamelCase__ )
def run_in_eager_mode(*lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ):
return func(*lowerCamelCase__ , **lowerCamelCase__ )
@wraps(lowerCamelCase__ )
@tf.function(experimental_compile=lowerCamelCase__ )
def run_in_graph_mode(*lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ):
return func(*lowerCamelCase__ , **lowerCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
lowercase__ : Dict = random.Random()
lowercase__ : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : TensorFlowBenchmarkArguments
_a : PretrainedConfig
_a : str = "TensorFlow"
@property
def UpperCAmelCase__( self ) -> Optional[int]:
return tf.__version__
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
# initialize GPU on separate process
lowercase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase__ : int = self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_inference )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowercase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase__ : Tuple = self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_train )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
lowercase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase__ : Any = self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_inference )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
lowercase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase__ : Optional[int] = self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_train )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
lowercase__ : Optional[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowercase__ : Dict = (
hasattr(UpperCamelCase_ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase__ : Union[str, Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase__ : int = __import__("""transformers""" , fromlist=[model_class] )
lowercase__ : Dict = getattr(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ : Optional[Any] = model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowercase__ : List[str] = TF_MODEL_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
lowercase__ : Any = config.vocab_size if hasattr(UpperCamelCase_ , """vocab_size""" ) else config.encoder.vocab_size
lowercase__ : List[str] = random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , training=UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase_ , training=UpperCamelCase_ )
lowercase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
lowercase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowercase__ : Tuple = (
hasattr(UpperCamelCase_ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase__ : int = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase__ : List[Any] = __import__("""transformers""" , fromlist=[model_class] )
lowercase__ : Any = getattr(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ : Union[str, Any] = model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowercase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
lowercase__ : Tuple = config.vocab_size if hasattr(UpperCamelCase_ , """vocab_size""" ) else config.encoder.vocab_size
lowercase__ : str = random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowercase__ : Union[str, Any] = model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
lowercase__ : int = tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowercase__ : str = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
lowercase__ : Tuple = tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
lowercase__ : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(UpperCamelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowercase__ : Tuple = timeit.repeat(
UpperCamelCase_ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Tuple:
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
lowercase__ : Optional[int] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
lowercase__ : Any = "N/A"
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
lowercase__ : List[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowercase__ : Tuple = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase_ )
lowercase__ : Union[str, Any] = meminfo.used
lowercase__ : int = Memory(UpperCamelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
lowercase__ : str = None
else:
lowercase__ : Optional[int] = measure_peak_memory_cpu(UpperCamelCase_ )
lowercase__ : Optional[Any] = Memory(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowercase__ : List[Any] = stop_memory_tracing(UpperCamelCase_ )
if memory is None:
lowercase__ : Any = summary.total
else:
lowercase__ : Optional[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None | 200 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __A :
def __init__( self , UpperCamelCase_ , ):
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = 13
__UpperCAmelCase : Tuple = 7
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Union[str, Any] = 2
__UpperCAmelCase : Dict = 99
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = 32
__UpperCAmelCase : Any = 2
__UpperCAmelCase : str = 4
__UpperCAmelCase : List[Any] = 0.1
__UpperCAmelCase : Optional[int] = 0.1
__UpperCAmelCase : Union[str, Any] = 5_12
__UpperCAmelCase : int = 16
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : int = 0.0_2
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : List[Any] = "last"
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : str = None
__UpperCAmelCase : Any = 0
def _snake_case ( self ):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_lengths:
__UpperCAmelCase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Dict = TFFlaubertModel(config=UpperCamelCase_ )
__UpperCAmelCase : int = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
__UpperCAmelCase : List[str] = model(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = [input_ids, input_mask]
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Dict = TFFlaubertWithLMHeadModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
__UpperCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
__UpperCAmelCase : str = {"input_ids": input_ids, "lengths": input_lengths}
__UpperCAmelCase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = TFFlaubertForSequenceClassification(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = {"input_ids": input_ids, "lengths": input_lengths}
__UpperCAmelCase : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Dict = TFFlaubertForTokenClassification(config=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = self.num_choices
__UpperCAmelCase : Optional[int] = TFFlaubertForMultipleChoice(config=UpperCamelCase_ )
__UpperCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[int] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[str] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCAmelCase : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case :List[str] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case :Optional[Any] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case :Tuple = False
snake_case :Any = False
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ):
__UpperCAmelCase : List[str] = TFFlaubertModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase_ )
@slow
def _snake_case ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = TFFlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __A (unittest.TestCase ):
@slow
def _snake_case ( self ):
__UpperCAmelCase : str = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
__UpperCAmelCase : Tuple = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCAmelCase : int = model(UpperCamelCase_ )[0]
__UpperCAmelCase : str = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
__UpperCAmelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 168 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = StableDiffusionXLImgaImgPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__A : Dict = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
__A : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
__A : Optional[Any] = CLIPTextModel(_lowercase )
__A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_lowercase )
__A : List[Any] = CLIPTextModelWithProjection(_lowercase )
__A : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=_lowercase )
__A : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _a ( self , __lowerCamelCase , __lowerCamelCase=0 ):
'''simple docstring'''
__A : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__A : List[Any] = image / 2 + 0.5
if str(_lowercase ).startswith('mps' ):
__A : Tuple = torch.manual_seed(_lowercase )
else:
__A : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__A : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def _a ( self ):
'''simple docstring'''
__A : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__A : int = self.get_dummy_components()
__A : Optional[int] = StableDiffusionXLImgaImgPipeline(**_lowercase )
__A : Any = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
__A : Optional[Any] = self.get_dummy_inputs(_lowercase )
__A : Optional[int] = sd_pipe(**_lowercase ).images
__A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__A : str = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
__A : Tuple = self.get_dummy_components()
__A : List[Any] = StableDiffusionXLImgaImgPipeline(**_lowercase )
__A : Union[str, Any] = sd_pipe.to(_lowercase )
__A : List[Any] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
# forward without prompt embeds
__A : Dict = self.get_dummy_inputs(_lowercase )
__A : str = 3 * ["""this is a negative prompt"""]
__A : Union[str, Any] = negative_prompt
__A : str = 3 * [inputs["""prompt"""]]
__A : Union[str, Any] = sd_pipe(**_lowercase )
__A : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__A : Optional[int] = self.get_dummy_inputs(_lowercase )
__A : List[str] = 3 * ["""this is a negative prompt"""]
__A : Dict = 3 * [inputs.pop('prompt' )]
(
__A
) : str = sd_pipe.encode_prompt(_lowercase , negative_prompt=_lowercase )
__A : List[str] = sd_pipe(
**_lowercase , prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , pooled_prompt_embeds=_lowercase , negative_pooled_prompt_embeds=_lowercase , )
__A : Any = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , __lowerCamelCase , __lowerCamelCase="cpu" , __lowerCamelCase=torch.floataa , __lowerCamelCase=0 ):
'''simple docstring'''
__A : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__A : Any = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
__A : List[Any] = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__A : str = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ):
'''simple docstring'''
__A : List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__A : List[Any] = self.get_inputs(_lowercase )
__A : Optional[Any] = pipe(**_lowercase ).images
__A : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__A : Tuple = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 719 | """simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : List[Any] ={
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = "van"
def __init__( self , __lowerCamelCase=224 , __lowerCamelCase=3 , __lowerCamelCase=[7, 3, 3, 3] , __lowerCamelCase=[4, 2, 2, 2] , __lowerCamelCase=[64, 128, 320, 512] , __lowerCamelCase=[3, 3, 12, 3] , __lowerCamelCase=[8, 8, 4, 4] , __lowerCamelCase="gelu" , __lowerCamelCase=0.02 , __lowerCamelCase=1e-6 , __lowerCamelCase=1e-2 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : List[Any] = image_size
__A : List[str] = num_channels
__A : Dict = patch_sizes
__A : str = strides
__A : List[str] = hidden_sizes
__A : List[str] = depths
__A : int = mlp_ratios
__A : Union[str, Any] = hidden_act
__A : List[str] = initializer_range
__A : List[Any] = layer_norm_eps
__A : int = layer_scale_init_value
__A : str = drop_path_rate
__A : List[Any] = dropout_rate
| 237 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
_lowercase : Optional[datasets.Features] = None
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
_lowercase : List[Any] = PandasConfig
def lowerCAmelCase_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : List[Any] , __A : Optional[int] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__A : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__A , (str, list, tuple) ):
__A : Any = data_files
if isinstance(__A , __A ):
__A : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : str = [dl_manager.iter_files(__A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__A : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(__A , __A ):
__A : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Dict = [dl_manager.iter_files(__A ) for file in files]
splits.append(datasets.SplitGenerator(name=__A , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase_ ( self : List[Any] , __A : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(__A , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase_ ( self : Dict , __A : List[str] ):
for i, file in enumerate(itertools.chain.from_iterable(__A ) ):
with open(__A , """rb""" ) as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(__A ) )
yield i, self._cast_table(__A )
| 17 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A = 3_84
if "tiny" in model_name:
A = [3, 3, 9, 3]
A = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
A = [3, 3, 27, 3]
A = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
A = [3, 3, 27, 3]
A = [1_28, 2_56, 5_12, 10_24]
A = 5_12
if "large" in model_name:
A = [3, 3, 27, 3]
A = [1_92, 3_84, 7_68, 15_36]
A = 7_68
if "xlarge" in model_name:
A = [3, 3, 27, 3]
A = [2_56, 5_12, 10_24, 20_48]
A = 10_24
# set label information
A = 1_50
A = """huggingface/label-files"""
A = """ade20k-id2label.json"""
A = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A = {v: k for k, v in idalabel.items()}
A = ConvNextConfig(
depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
A = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def _lowerCAmelCase ( UpperCamelCase__: int ) -> Dict:
"""simple docstring"""
A = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: int ) -> Optional[Any]:
"""simple docstring"""
A = dct.pop(UpperCamelCase__ )
A = val
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: int ) -> List[Any]:
"""simple docstring"""
A = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
A = model_name_to_url[model_name]
A = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""state_dict"""]
A = get_upernet_config(UpperCamelCase__ )
A = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
A = key.replace("""bn""" , """batch_norm""" )
A = val
# rename keys
A = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
A = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
A = SegformerImageProcessor()
A = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A = model(UpperCamelCase__ )
if model_name == "upernet-convnext-tiny":
A = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
A = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
A = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
A = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
A = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641 | 0 |
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> bool:
"""simple docstring"""
if num < 0:
return False
A = num
A = 0
while num > 0:
A = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase : int = 16
UpperCamelCase : Optional[int] = 32
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = "bert-base-cased" ) -> Optional[int]:
"""simple docstring"""
A = AutoTokenizer.from_pretrained(UpperCamelCase__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(UpperCamelCase__ )
A , A = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , )
else:
A = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
A = model(**UpperCamelCase__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**UpperCamelCase__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , UpperCamelCase__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCamelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCamelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCamelCase__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 91 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__A : List[Any] = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , **lowerCamelCase : Any ) -> Any:
lowerCAmelCase_ : str = feature_size
lowerCAmelCase_ : Union[str, Any] = sampling_rate
lowerCAmelCase_ : int = padding_value
lowerCAmelCase_ : Tuple = kwargs.pop("""padding_side""" , """right""" )
lowerCAmelCase_ : Dict = kwargs.pop("""return_attention_mask""" , lowerCamelCase )
super().__init__(**lowerCamelCase )
def __lowercase ( self : Any , lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCamelCase : Union[bool, str, PaddingStrategy] = True , lowerCamelCase : Optional[int] = None , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCAmelCase_ : str = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
lowerCAmelCase_ : Any = processed_features[self.model_input_names[0]]
lowerCAmelCase_ : Union[str, Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase ) == 0:
if return_attention_mask:
lowerCAmelCase_ : str = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCAmelCase_ : Any = required_input[0]
if isinstance(lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase ):
lowerCAmelCase_ : int = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase ):
lowerCAmelCase_ : List[Any] = """tf"""
elif is_torch_tensor(lowerCamelCase ):
lowerCAmelCase_ : List[Any] = """pt"""
elif isinstance(lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
lowerCAmelCase_ : Optional[Any] = """np"""
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowerCamelCase )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCAmelCase_ : Union[str, Any] = to_numpy(lowerCamelCase )
else:
lowerCAmelCase_ : Union[str, Any] = [to_numpy(lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCAmelCase_ : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase , max_length=lowerCamelCase )
lowerCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]]
lowerCAmelCase_ : List[str] = len(lowerCamelCase )
if not all(len(lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowerCAmelCase_ : Union[str, Any] = []
for i in range(lowerCamelCase ):
lowerCAmelCase_ : List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCAmelCase_ : Optional[Any] = self._truncate(
lowerCamelCase , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
truncated_inputs.append(lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCAmelCase_ : Dict = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCAmelCase_ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCAmelCase_ : Union[str, Any] = {}
for i in range(lowerCamelCase ):
# padding
lowerCAmelCase_ : Tuple = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCAmelCase_ : List[Any] = []
if value.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Union[str, Any] = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase )
return BatchFeature(lowerCamelCase , tensor_type=lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ) -> dict:
lowerCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCAmelCase_ : Dict = len(lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase_ : List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCAmelCase_ : Optional[Any] = np.ones(len(lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCAmelCase_ : List[str] = max_length - len(lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
lowerCAmelCase_ : Any = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
lowerCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCAmelCase_ : Optional[int] = np.pad(
lowerCamelCase , lowerCamelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCAmelCase_ : Tuple = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
lowerCAmelCase_ : str = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCAmelCase_ : List[Any] = np.pad(
lowerCamelCase , lowerCamelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def __lowercase ( self : str , lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowerCAmelCase_ : List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase_ : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase_ : Union[str, Any] = len(lowerCamelCase ) > max_length
if needs_to_be_truncated:
lowerCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCAmelCase_ : Union[str, Any] = processed_features["""attention_mask"""][:max_length]
return processed_features
def __lowercase ( self : Dict , lowerCamelCase : List[str]=False , lowerCamelCase : List[str]=None ) -> List[str]:
# Get padding strategy
if padding is not False:
if padding is True:
lowerCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : str = PaddingStrategy(lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : List[str] = padding
else:
lowerCAmelCase_ : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 275 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A : str = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether tp freeze the encoder.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Whether to freeze the embeddings.'})
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
lowercase = field(
default='summarization' ,metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} ,)
lowercase = field(
default=10_24 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} ,)
lowercase = field(
default=1_42 ,metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(default=-1 ,metadata={'help': '# training examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# validation examples. -1 means use all.'})
lowercase = field(default=-1 ,metadata={'help': '# test examples. -1 means use all.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Source language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'Target language id for translation.'})
lowercase = field(default=_SCREAMING_SNAKE_CASE ,metadata={'help': '# num_beams to use for evaluation.'})
lowercase = field(
default=_SCREAMING_SNAKE_CASE ,metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} ,)
def UpperCamelCase_ ( A__ : Tuple , A__ : List[str] , A__ : str ):
'''simple docstring'''
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(A__ , os.path.join(A__ , f'{split}_results.json' ) )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(A__ , A__ , getattr(A__ , A__ ) )
lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase_ : List[Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase_ : Any = SeqaSeqDataset
# Get datasets
lowerCAmelCase_ : List[str] = (
dataset_class(
A__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase_ : List[Any] = (
dataset_class(
A__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase_ : Dict = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
lowerCAmelCase_ : str = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
lowerCAmelCase_ : Dict = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowerCAmelCase_ : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase_ : Union[str, Any] = train_result.metrics
lowerCAmelCase_ : Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ : Any = trainer.evaluate(metric_key_prefix="""val""" )
lowerCAmelCase_ : Optional[int] = data_args.n_val
lowerCAmelCase_ : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase_ : Union[str, Any] = trainer.predict(test_dataset=A__ , metric_key_prefix="""test""" )
lowerCAmelCase_ : Optional[int] = test_output.metrics
lowerCAmelCase_ : List[Any] = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase_ : int = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
lowerCAmelCase_ : int = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
lowerCAmelCase_ : List[Any] = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 275 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( __A ):
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """CLIPImageProcessor"""
__UpperCamelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase ):
lowerCAmelCase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowercase , )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowercase , _lowercase )
def __call__( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase_ : Tuple = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
lowerCAmelCase_ : Union[str, Any] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
lowerCAmelCase_ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 440 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__ ( __A ):
__UpperCamelCase = """M-CLIP"""
def __init__( self , _lowercase=1_024 , _lowercase=768 , **_lowercase ):
lowerCAmelCase_ : Tuple = transformerDimSize
lowerCAmelCase_ : Optional[Any] = imageDimSize
super().__init__(**_lowercase )
class lowercase__ ( __A ):
__UpperCamelCase = MCLIPConfig
def __init__( self , _lowercase , *_lowercase , **_lowercase ):
super().__init__(_lowercase , *_lowercase , **_lowercase )
lowerCAmelCase_ : str = XLMRobertaModel(_lowercase )
lowerCAmelCase_ : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[int] = self.transformer(input_ids=_lowercase , attention_mask=_lowercase )[0]
lowerCAmelCase_ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowercase ), embs
| 440 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
def __init__( self : Any,__A : Optional[int],__A : Union[str, Any]=1_2,__A : List[Any]=7,__A : Any=True,__A : str=True,__A : Optional[int]=True,__A : Dict=9_9,__A : List[Any]=3_2,__A : str=3_2,__A : Tuple=2,__A : str=4,__A : Dict=3_7,__A : str=0.1,__A : Optional[int]=0.1,__A : Union[str, Any]=5_1_2,__A : Any=0.02,__A : str=0,__A : int=None,):
_lowerCamelCase : Dict = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_input_mask
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Tuple = projection_dim
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[Any] = dropout
_lowerCamelCase : str = attention_dropout
_lowerCamelCase : int = max_position_embeddings
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[int] = scope
_lowerCamelCase : int = bos_token_id
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCamelCase : int = input_mask.numpy()
_lowerCamelCase : Union[str, Any] = input_mask.shape
_lowerCamelCase : Tuple = np.random.randint(1,seq_length - 1,size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Dict = 0
_lowerCamelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(a_ )
def lowerCamelCase_ ( self : Tuple ):
return BlipTextConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,projection_dim=self.projection_dim,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,dropout=self.dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,bos_token_id=self.bos_token_id,)
def lowerCamelCase_ ( self : Dict,__A : Union[str, Any],__A : Optional[int],__A : Optional[int] ):
_lowerCamelCase : str = TFBlipTextModel(config=a_ )
_lowerCamelCase : Dict = model(a_,attention_mask=a_,training=a_ )
_lowerCamelCase : List[Any] = model(a_,training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[int] = BlipTextModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self,config_class=a_,hidden_size=3_7 )
def lowerCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase_ ( self : List[str] ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase_ ( self : Tuple ):
pass
@slow
def lowerCamelCase_ ( self : int ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Any = TFBlipTextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowerCamelCase_ ( self : int,__A : Tuple=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=a_ ) | 44 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( A ,A ) -> str:
lowercase : Optional[int] = old_name
if "patch_embed" in old_name:
lowercase , lowercase , lowercase : Tuple = old_name.split("." )
if layer == "0":
lowercase : int = old_name.replace("0" ,"convolution1" )
elif layer == "1":
lowercase : List[str] = old_name.replace("1" ,"batchnorm_before" )
elif layer == "3":
lowercase : Dict = old_name.replace("3" ,"convolution2" )
else:
lowercase : Union[str, Any] = old_name.replace("4" ,"batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" ,A ):
lowercase : List[str] = r"\b\d{2}\b"
if bool(re.search(A ,A ) ):
lowercase : str = re.search(r"\d\.\d\d." ,A ).group()
else:
lowercase : int = re.search(r"\d\.\d." ,A ).group()
if int(match[0] ) < 6:
lowercase : str = old_name.replace(A ,"" )
lowercase : List[str] = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] )
lowercase : Optional[Any] = "intermediate_stages." + trimmed_name
else:
lowercase : str = old_name.replace(A ,"" )
if int(match[2] ) < num_meta4D_last_stage:
lowercase : Optional[int] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] )
else:
lowercase : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowercase : List[Any] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
lowercase : str = trimmed_name.replace("norm1" ,"layernorm1" )
elif "norm2" in old_name:
lowercase : Optional[Any] = trimmed_name.replace("norm2" ,"layernorm2" )
elif "fc1" in old_name:
lowercase : Optional[int] = trimmed_name.replace("fc1" ,"linear_in" )
elif "fc2" in old_name:
lowercase : str = trimmed_name.replace("fc2" ,"linear_out" )
lowercase : Dict = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." ,A ):
lowercase : Union[str, Any] = old_name.replace("network" ,"intermediate_stages" )
if "fc" in new_name:
lowercase : Any = new_name.replace("fc" ,"convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase : Optional[Any] = new_name.replace("norm1" ,"batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase : List[str] = new_name.replace("norm2" ,"batchnorm_after" )
if "proj" in new_name:
lowercase : Optional[int] = new_name.replace("proj" ,"projection" )
if "dist_head" in new_name:
lowercase : Tuple = new_name.replace("dist_head" ,"distillation_classifier" )
elif "head" in new_name:
lowercase : Tuple = new_name.replace("head" ,"classifier" )
elif "patch_embed" in new_name:
lowercase : Optional[int] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase : str = new_name.replace("norm" ,"layernorm" )
lowercase : List[Any] = "efficientformer." + new_name
else:
lowercase : Optional[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( A ,A ) -> Optional[Any]:
for key in checkpoint.copy().keys():
lowercase : List[str] = checkpoint.pop(A )
lowercase : int = val
return checkpoint
def _A ( ) -> Optional[int]:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : Optional[Any] = Image.open(requests.get(A ,stream=A ).raw )
return image
def _A ( A ,A ,A ,A ) -> List[Any]:
lowercase : Optional[int] = torch.load(A ,map_location="cpu" )["model"]
lowercase : int = EfficientFormerConfig.from_json_file(A )
lowercase : Tuple = EfficientFormerForImageClassificationWithTeacher(A )
lowercase : int = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
lowercase : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
lowercase : int = convert_torch_checkpoint(A ,A )
model.load_state_dict(A )
model.eval()
lowercase : List[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
lowercase : Tuple = prepare_img()
lowercase : Optional[int] = 2_5_6
lowercase : str = 2_2_4
lowercase : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,)
lowercase : Union[str, Any] = processor(images=A ,return_tensors="pt" ).pixel_values
# original processing pipeline
lowercase : Tuple = Compose(
[
Resize(A ,interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(A ),
ToTensor(),
Normalize(A ,A ),
] )
lowercase : List[Any] = image_transforms(A ).unsqueeze(0 )
assert torch.allclose(A ,A )
lowercase : Union[str, Any] = model(A )
lowercase : Any = outputs.logits
lowercase : List[str] = (1, 1_0_0_0)
if "l1" in model_name:
lowercase : Any = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase : List[Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase : Optional[int] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add model" ,use_temp_dir=A ,)
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add image processor" ,use_temp_dir=A ,)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 372 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : str = 1_3
lowerCAmelCase_ : List[str] = 7
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : List[Any] = 9_9
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 3_2
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Dict = 4
lowerCAmelCase_ : List[Any] = 0.1
lowerCAmelCase_ : Optional[Any] = 0.1
lowerCAmelCase_ : Any = 5_1_2
lowerCAmelCase_ : Union[str, Any] = 1_6
lowerCAmelCase_ : Optional[Any] = 2
lowerCAmelCase_ : Optional[Any] = 0.02
lowerCAmelCase_ : List[Any] = 3
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : Tuple = 'last'
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : int = 0
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_input_lengths:
lowerCAmelCase_ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Any = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase_ : int = TFFlaubertModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = [input_ids, input_mask]
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase_ : Optional[int] = TFFlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase_ : List[str] = TFFlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : List[str] = TFFlaubertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Union[str, Any] = TFFlaubertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase_ : Any = self.num_choices
lowerCAmelCase_ : Optional[Any] = TFFlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : str = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = TFFlaubertModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any = TFFlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[Any] = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : Tuple = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
lowerCAmelCase_ : int = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 711 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = 0
while b > 0:
if b & 1:
lowerCAmelCase_ : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 317 | 0 |
SCREAMING_SNAKE_CASE__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
SCREAMING_SNAKE_CASE__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 9 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case : Dict = open # noqa: we just need to have a builtin inside this module to test it properly
| 131 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1_60_00 ):
'''simple docstring'''
__lowercase = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase__ ) <= sample_length:
return wav
__lowercase = randint(0 , len(lowerCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of a dataset from the datasets package"} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the training audio paths and labels."} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A file containing the validation audio paths and labels."} )
__UpperCAmelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
__UpperCAmelCase = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
__UpperCAmelCase = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , )
__UpperCAmelCase = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCAmelCase = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def A ( self ) -> int:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , _a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
__lowercase = DatasetDict()
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__lowercase = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase ):
__lowercase = []
for audio in batch[data_args.audio_column_name]:
__lowercase = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase__ )
__lowercase = feature_extractor(lowerCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
__lowercase = {model_input_name: inputs.get(lowerCAmelCase__ )}
__lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase ):
__lowercase = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
__lowercase = feature_extractor(lowerCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
__lowercase = {model_input_name: inputs.get(lowerCAmelCase__ )}
__lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowercase = raw_datasets['''train'''].features[data_args.label_column_name].names
__lowercase , __lowercase = {}, {}
for i, label in enumerate(lowerCAmelCase__ ):
__lowercase = str(lowerCAmelCase__ )
__lowercase = label
# Load the accuracy metric from the datasets package
__lowercase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
__lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=eval_pred.label_ids )
__lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowercase = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase__ , output_all_columns=lowerCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowercase = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase__ , output_all_columns=lowerCAmelCase__ )
# Initialize our trainer
__lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
__lowercase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 721 |
import unittest
from knapsack import knapsack as k
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
__lowercase = [6_0]
__lowercase = [1_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 0 )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 5 )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = 5_0
__lowercase = [6_0, 1_0_0, 1_2_0]
__lowercase = [1_0, 2_0, 3_0]
__lowercase = len(snake_case_ )
self.assertEqual(k.knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 527 | 0 |
"""simple docstring"""
import copy
import re
class lowerCAmelCase__ :
lowercase__ : Optional[Any] = """hp"""
lowercase__ : Dict = {}
lowercase__ : str = None
@classmethod
def lowercase_ ( cls , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = prefix
A__ = defaults
cls.build_naming_info()
@staticmethod
def lowercase_ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return ""
A__ = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCamelCase__ ) + 1 ):
A__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCamelCase__ ):
A__ = ""
while integer != 0:
A__ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
A__ = 0
while True:
A__ = word + "#" + int_to_alphabetic(UpperCamelCase__ )
if sword in info["reverse_short_word"]:
continue
else:
A__ = sword
break
A__ = short_word
A__ = word
return short_word
@staticmethod
def lowercase_ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = param_name.split("_" )
A__ = [TrialShortNamer.shortname_for_word(UpperCamelCase__ , UpperCamelCase__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A__ = ["", "_"]
for separator in separators:
A__ = separator.join(UpperCamelCase__ )
if shortname not in info["reverse_short_param"]:
A__ = shortname
A__ = param_name
return shortname
return param_name
@staticmethod
def lowercase_ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = TrialShortNamer.shortname_for_key(UpperCamelCase__ , UpperCamelCase__ )
A__ = short_name
A__ = param_name
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
A__ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
A__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(UpperCamelCase__ , UpperCamelCase__ )
A__ = info
@classmethod
def lowercase_ ( cls , UpperCamelCase__ ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
A__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A__ = cls.NAMING_INFO["short_param"][k]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = 1 if v else 0
A__ = "" if isinstance(UpperCamelCase__ , (int, float) ) else "-"
A__ = f"""{key}{sep}{v}"""
name.append(UpperCamelCase__ )
return "_".join(UpperCamelCase__ )
@classmethod
def lowercase_ ( cls , UpperCamelCase__ ):
'''simple docstring'''
A__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A__ = []
else:
A__ = repr.split("_" )
A__ = {}
for value in values:
if "-" in value:
A__ , A__ = value.split("-" )
else:
A__ = re.sub("[0-9.]" , "" , UpperCamelCase__ )
A__ = float(re.sub("[^0-9.]" , "" , UpperCamelCase__ ) )
A__ = cls.NAMING_INFO["reverse_short_param"][p_k]
A__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A__ = cls.DEFAULTS[k]
return parameters | 337 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , """Tatoeba directory does not exist.""" )
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
A__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ , A__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng" | 337 | 1 |
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = """facebook/bart-large-mnli"""
_SCREAMING_SNAKE_CASE : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_SCREAMING_SNAKE_CASE : int = """text_classifier"""
_SCREAMING_SNAKE_CASE : int = AutoTokenizer
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification
_SCREAMING_SNAKE_CASE : str = ["""text""", ["""text"""]]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""text"""]
def lowerCAmelCase (self : int ):
super().setup()
__a : List[str] = self.model.config
__a : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__a : Optional[Any] = int(snake_case_ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : Tuple ):
__a : Any = labels
return self.pre_processor(
[text] * len(snake_case_ ) , [f"This example is {label}" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def lowerCAmelCase (self : int , snake_case_ : str ):
__a : Tuple = outputs.logits
__a : str = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 717 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
__a : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__a : List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('''RGB''' )
__a : Union[str, Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
__a : Optional[Any] = transform(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
return image
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if "visual_encoder" in key:
__a : Union[str, Any] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCAmelCase__ )
if "blocks" in key:
__a : Optional[int] = re.sub(R'''blocks''' , '''layers''' , lowerCAmelCase__ )
if "attn" in key:
__a : Optional[int] = re.sub(R'''attn''' , '''self_attn''' , lowerCAmelCase__ )
if "norm1" in key:
__a : List[Any] = re.sub(R'''norm1''' , '''layer_norm1''' , lowerCAmelCase__ )
if "norm2" in key:
__a : List[Any] = re.sub(R'''norm2''' , '''layer_norm2''' , lowerCAmelCase__ )
if "encoder.norm" in key:
__a : str = re.sub(R'''encoder.norm''' , '''post_layernorm''' , lowerCAmelCase__ )
if "encoder.patch_embed.proj" in key:
__a : str = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCAmelCase__ )
if "encoder.pos_embed" in key:
__a : Tuple = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCAmelCase__ )
if "encoder.cls_token" in key:
__a : Any = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCAmelCase__ )
if "self_attn" in key:
__a : int = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , lowerCAmelCase__ )
return key
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any]=None ):
if config_path is not None:
__a : int = BlipConfig.from_pretrained(lowerCAmelCase__ )
else:
__a : int = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__a : List[str] = BlipForConditionalGeneration(lowerCAmelCase__ ).eval()
__a : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__a : Any = blip_decoder(pretrained=lowerCAmelCase__ , image_size=3_8_4 , vit='''base''' )
__a : Union[str, Any] = pt_model.eval()
__a : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
__a : Tuple = modified_state_dict.pop(lowerCAmelCase__ )
__a : List[Any] = rename_key(lowerCAmelCase__ )
__a : Optional[Any] = value
hf_model.load_state_dict(lowerCAmelCase__ )
__a : Union[str, Any] = 3_8_4
__a : Tuple = load_demo_image(image_size=lowerCAmelCase__ , device='''cpu''' )
__a : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__a : Union[str, Any] = tokenizer(['''a picture of'''] ).input_ids
__a : List[str] = hf_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__a : Optional[Any] = hf_model.generate(lowerCAmelCase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__a : Any = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__a : Tuple = blip_vqa(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit='''base''' )
vqa_model.eval()
__a : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__a : List[Any] = modified_state_dict.pop(lowerCAmelCase__ )
__a : Dict = rename_key(lowerCAmelCase__ )
__a : Dict = value
__a : List[str] = BlipForQuestionAnswering(lowerCAmelCase__ )
hf_vqa_model.load_state_dict(lowerCAmelCase__ )
__a : Union[str, Any] = ['''How many dogs are in this image?''']
__a : Tuple = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' ).input_ids
__a : Union[str, Any] = hf_vqa_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__a : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__a : Dict = blip_itm(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit='''base''' )
itm_model.eval()
__a : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
__a : Dict = modified_state_dict.pop(lowerCAmelCase__ )
__a : int = rename_key(lowerCAmelCase__ )
__a : Optional[int] = value
__a : Any = BlipForImageTextRetrieval(lowerCAmelCase__ )
__a : List[Any] = ['''A picture of a woman with a dog sitting in a beach''']
__a : Optional[int] = tokenizer(
lowerCAmelCase__ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase__ )
hf_itm_model.eval()
__a : int = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
__a : Dict = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase__ =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 326 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "spiece.model"}
__lowerCamelCase = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__lowerCamelCase = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[int]=False , snake_case : Dict=False , snake_case : int=False , snake_case : Dict=None , snake_case : Optional[Any]=None , snake_case : Any=None , snake_case : Any=None , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Tuple , ):
UpperCAmelCase_ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ :Any = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
UpperCAmelCase_ :Tuple = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ :Optional[Any] = '''<|endoftext|>''' if eos_token is None else eos_token
UpperCAmelCase_ :List[Any] = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ :List[str] = unk_token if pad_token is None else pad_token
UpperCAmelCase_ :Tuple = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ :Optional[int] = '''<pad>''' if pad_token is None else pad_token
UpperCAmelCase_ :str = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase_ :str = do_lower_case
UpperCAmelCase_ :Optional[Any] = remove_space
UpperCAmelCase_ :Any = keep_accents
UpperCAmelCase_ :List[Any] = vocab_file
UpperCAmelCase_ :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ :Optional[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ :int = re.compile(
f'[{"".join(map(snake_case , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self : str ):
UpperCAmelCase_ :str = self.__dict__.copy()
UpperCAmelCase_ :Any = None
return state
def __setstate__( self : Tuple , snake_case : List[Any] ):
UpperCAmelCase_ :Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ :Union[str, Any] = {}
UpperCAmelCase_ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def snake_case_ ( self : str ):
return len(self.sp_model )
def snake_case_ ( self : int , snake_case : str ):
UpperCAmelCase_ :Union[str, Any] = self.non_printing_characters_re.sub('''''' , snake_case )
# Normalize whitespaces
UpperCAmelCase_ :Optional[int] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ :List[Any] = unicodedata.normalize('''NFC''' , snake_case )
return text
def snake_case_ ( self : int , snake_case : str , **snake_case : Optional[int] ):
UpperCAmelCase_ :str = self.preprocess_text(snake_case )
return self.sp_model.encode(snake_case , out_type=snake_case )
def snake_case_ ( self : Dict , snake_case : str ):
return self.sp_model.PieceToId(snake_case )
def snake_case_ ( self : int , snake_case : int ):
return self.sp_model.IdToPiece(snake_case )
@staticmethod
def snake_case_ ( snake_case : str ):
return out_string
def snake_case_ ( self : Any , snake_case : List[str] ):
UpperCAmelCase_ :Optional[int] = []
UpperCAmelCase_ :List[str] = ''''''
UpperCAmelCase_ :Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
UpperCAmelCase_ :Union[str, Any] = True
UpperCAmelCase_ :Optional[Any] = []
else:
current_sub_tokens.append(snake_case )
UpperCAmelCase_ :str = False
out_string += self.sp_model.decode(snake_case )
return out_string
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Optional[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : int , snake_case : str , snake_case : Optional[str] = None ):
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_ :str = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , '''wb''' ) as fi:
UpperCAmelCase_ :Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
def snake_case_ ( self : Dict , snake_case : Union[str, List[str]] , snake_case : Union[str, bool] = False ):
if isinstance(snake_case , snake_case ):
UpperCAmelCase_ :Optional[Any] = self.preprocess_text(snake_case )
UpperCAmelCase_ :Any = self.sp_model.encode(snake_case )
else:
UpperCAmelCase_ :List[Any] = [self.preprocess_text(snake_case ) for t in text]
UpperCAmelCase_ :Tuple = self.sp_model.encode(snake_case )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ :Any = torch.tensor(snake_case )
return token_ids
def snake_case_ ( self : Dict , snake_case : Union[int, List[int]] ):
return self.sp_model.decode(snake_case )
def snake_case_ ( self : Union[str, Any] , snake_case : "Conversation" ):
UpperCAmelCase_ :List[Any] = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ :Union[str, Any] = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(snake_case ) + f'{self.bos_token}Bot:'
)
return self.encode(text=snake_case )
| 608 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a ( __snake_case : float, __snake_case : float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=1_8 , _lowerCAmelCase=3_0 , _lowerCAmelCase=4_0_0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ):
_lowercase : Optional[Any] = size if size is not None else {'shortest_edge': 2_0}
_lowercase : List[str] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
_lowercase : Optional[Any] = parent
_lowercase : List[Any] = batch_size
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = image_size
_lowercase : str = min_resolution
_lowercase : Optional[Any] = max_resolution
_lowercase : Union[str, Any] = do_resize
_lowercase : List[Any] = size
_lowercase : List[str] = do_center_crop
_lowercase : Tuple = crop_size
_lowercase : List[Any] = do_flip_channel_order
def __a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def __a ( self ):
_lowercase : List[str] = MobileViTImageProcessingTester(self )
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
_lowercase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_flip_channel_order' ) )
def __a ( self ):
_lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
_lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def __a ( self ):
pass
def __a ( self ):
# Initialize image_processing
_lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowercase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : List[str] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ):
# Initialize image_processing
_lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowercase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : Union[str, Any] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __a ( self ):
# Initialize image_processing
_lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase : List[Any] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 66 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=False ):
_a : List[Any] = OmegaConf.load(UpperCamelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(UpperCamelCase_ ) ) )
return config
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
if conf_path is None:
_a : Tuple = '''./model_checkpoints/vqgan_only.yaml'''
_a : Dict = load_config(UpperCamelCase_ , display=UpperCamelCase_ )
_a : Optional[int] = VQModel(**config.model.params )
if ckpt_path is None:
_a : List[str] = '''./model_checkpoints/vqgan_only.pt'''
_a : Optional[int] = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
if ".ckpt" in ckpt_path:
_a : Dict = sd['''state_dict''']
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
model.to(UpperCamelCase_ )
del sd
return model
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a , _a , _a : Optional[Any] = model.encode(UpperCamelCase_ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_a : List[str] = model.decode(UpperCamelCase_ )
return xrec
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=False ):
_a , _a : Tuple = string.rsplit('''.''' , 1 )
if reload:
_a : int = importlib.import_module(UpperCamelCase_ )
importlib.reload(UpperCamelCase_ )
return getattr(importlib.import_module(UpperCamelCase_ , package=UpperCamelCase_ ) , cls )
def lowerCamelCase_ ( UpperCamelCase_ ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_=True ):
_a : List[Any] = instantiate_from_config(UpperCamelCase_ )
if sd is not None:
model.load_state_dict(UpperCamelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# load the specified checkpoint
if ckpt:
_a : Tuple = torch.load(UpperCamelCase_ , map_location='''cpu''' )
_a : int = pl_sd['''global_step''']
print(f"""loaded model from global step {global_step}.""" )
else:
_a : Dict = {'''state_dict''': None}
_a : List[str] = None
_a : List[Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=UpperCamelCase_ , eval_mode=UpperCamelCase_ )['''model''']
return model, global_step
| 471 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : Dict = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __UpperCAmelCase :
'''simple docstring'''
lowercase : Dict = PegasusConfig
lowercase : Dict = {}
lowercase : List[str] = "gelu"
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =pad_token_id
_SCREAMING_SNAKE_CASE =bos_token_id
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_SCREAMING_SNAKE_CASE =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE =np.concatenate([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE =prepare_pegasus_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =2_0
_SCREAMING_SNAKE_CASE =model_class_name(_A )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['''input_ids'''] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_SCREAMING_SNAKE_CASE =model.init_cache(decoder_input_ids.shape[0] , _A , _A )
_SCREAMING_SNAKE_CASE =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_SCREAMING_SNAKE_CASE =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE =model.decode(_A , _A )
_SCREAMING_SNAKE_CASE =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =2_0
_SCREAMING_SNAKE_CASE =model_class_name(_A )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['''input_ids'''] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_SCREAMING_SNAKE_CASE =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE =model.init_cache(decoder_input_ids.shape[0] , _A , _A )
_SCREAMING_SNAKE_CASE =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE =model.decode(_A , _A , decoder_attention_mask=_A )
_SCREAMING_SNAKE_CASE =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _lowerCAmelCase(a : Optional[int] , a : Union[str, Any] , a : Union[str, Any] , a : List[str]=None , a : int=None , ) -> Dict:
if attention_mask is None:
_SCREAMING_SNAKE_CASE =np.not_equal(a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __UpperCAmelCase ( _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase : Any = True
lowercase : Any = False
lowercase : Union[str, Any] = False
lowercase : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =FlaxPegasusModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_A , _A )
_SCREAMING_SNAKE_CASE =model_class(_A )
@jax.jit
def encode_jitted(_A , _A=None , **_A ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
_SCREAMING_SNAKE_CASE =encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE =encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_SCREAMING_SNAKE_CASE ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A , _A , _A ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
_SCREAMING_SNAKE_CASE =decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE =decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_A )
_SCREAMING_SNAKE_CASE =np.ones((1, 1) )
_SCREAMING_SNAKE_CASE =model(_A )
self.assertIsNotNone(_A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_SCREAMING_SNAKE_CASE =PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_SCREAMING_SNAKE_CASE =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_SCREAMING_SNAKE_CASE =[
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_SCREAMING_SNAKE_CASE =tokenizer(_A , return_tensors='''np''' , truncation=_A , max_length=5_1_2 , padding=_A )
_SCREAMING_SNAKE_CASE =model.generate(**_A , num_beams=2 ).sequences
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_A , skip_special_tokens=_A )
assert tgt_text == decoded
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : str = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165 | 1 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase_ ( snake_case__ ):
_a : Union[str, Any] = (DPMSolverSDEScheduler,)
_a : List[Any] = 1_0
def __a ( self : Any , **lowerCamelCase : str ):
lowerCamelCase_ : Any = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCamelCase )
return config
def __a ( self : Tuple ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __a ( self : int ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __a ( self : Any ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __a ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __a ( self : Optional[int] ):
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : str = self.get_scheduler_config()
lowerCamelCase_ : Optional[Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : Optional[Any] = self.dummy_model()
lowerCamelCase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : Union[str, Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : str = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = output.prev_sample
lowerCamelCase_ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Tuple ):
lowerCamelCase_ : List[str] = self.scheduler_classes[0]
lowerCamelCase_ : Any = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ : Tuple = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : Any = self.dummy_model()
lowerCamelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = output.prev_sample
lowerCamelCase_ : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : List[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __a ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCamelCase_ : Dict = self.dummy_model()
lowerCamelCase_ : Dict = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ : Tuple = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : List[str] = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = output.prev_sample
lowerCamelCase_ : List[Any] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : str = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Any ):
lowerCamelCase_ : Dict = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : Union[str, Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
lowerCamelCase_ : Dict = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
lowerCamelCase_ : Optional[Any] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Any = output.prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 364 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Any ={
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 1 |
"""simple docstring"""
from math import isqrt
def lowercase ( a__ : Optional[int] ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase_ ) + 1 ) )
def lowercase ( a__ : int = 10**6 ) -> int:
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 420 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """FlavaImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("feature_extractor" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCamelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if images is not None:
UpperCamelCase = self.image_processor(
SCREAMING_SNAKE_CASE , return_image_mask=SCREAMING_SNAKE_CASE , return_codebook_pixels=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE , )
return self.image_processor
| 606 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'fnet'
def __init__( self : Any , __A : Tuple=32000 , __A : int=768 , __A : List[str]=12 , __A : Optional[int]=3072 , __A : str="gelu_new" , __A : Optional[Any]=0.1 , __A : Optional[int]=512 , __A : List[str]=4 , __A : List[Any]=0.02 , __A : int=1E-12 , __A : List[str]=False , __A : List[Any]=512 , __A : List[Any]=3 , __A : List[Any]=1 , __A : List[Any]=2 , **__A : int , ) ->int:
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a__ :int = vocab_size
a__ :int = max_position_embeddings
a__ :Tuple = hidden_size
a__ :Union[str, Any] = num_hidden_layers
a__ :Optional[int] = intermediate_size
a__ :Any = hidden_act
a__ :Optional[int] = hidden_dropout_prob
a__ :Union[str, Any] = initializer_range
a__ :Optional[int] = type_vocab_size
a__ :List[Any] = layer_norm_eps
a__ :str = use_tpu_fourier_optimizations
a__ :List[Any] = tpu_short_seq_length
| 373 |
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ) ->Any:
"""simple docstring"""
a__ :Optional[Any] = []
def _snake_case ( self : Optional[Any] , __A : List[Any] ) ->List[str]:
"""simple docstring"""
return self.node_position[vertex]
def _snake_case ( self : Optional[Any] , __A : str , __A : Any ) ->Dict:
"""simple docstring"""
a__ :Dict = pos
def _snake_case ( self : str , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] ) ->List[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a__ :str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a__ :Optional[int] = 2 * start + 1
else:
a__ :List[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
a__ , a__ :Optional[Any] = heap[smallest_child], positions[smallest_child]
a__ , a__ :int = (
heap[start],
positions[start],
)
a__ , a__ :List[Any] = temp, tempa
a__ :Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __A )
self.top_to_bottom(__A , __A , __A , __A )
def _snake_case ( self : List[str] , __A : Any , __A : List[str] , __A : Any , __A : str ) ->Optional[Any]:
"""simple docstring"""
a__ :Optional[Any] = position[index]
while index != 0:
a__ :str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
a__ :int = heap[parent]
a__ :Optional[Any] = position[parent]
self.set_position(position[parent] , __A )
else:
a__ :List[Any] = val
a__ :List[Any] = temp
self.set_position(__A , __A )
break
a__ :Union[str, Any] = parent
else:
a__ :int = val
a__ :Dict = temp
self.set_position(__A , 0 )
def _snake_case ( self : Tuple , __A : int , __A : int ) ->Union[str, Any]:
"""simple docstring"""
a__ :Tuple = len(__A ) // 2 - 1
for i in range(__A , -1 , -1 ):
self.top_to_bottom(__A , __A , len(__A ) , __A )
def _snake_case ( self : List[Any] , __A : List[Any] , __A : int ) ->Optional[Any]:
"""simple docstring"""
a__ :Any = positions[0]
a__ :str = sys.maxsize
self.top_to_bottom(__A , 0 , len(__A ) , __A )
return temp
def lowerCamelCase__ ( a : Any ) -> Union[str, Any]:
"""simple docstring"""
a__ :Tuple = Heap()
a__ :List[Any] = [0] * len(a )
a__ :str = [-1] * len(a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a__ :Any = [] # Heap of Distance of vertices from their neighboring vertex
a__ :int = []
for vertex in range(len(a ) ):
distance_tv.append(sys.maxsize )
positions.append(a )
heap.node_position.append(a )
a__ :Tuple = []
a__ :Any = 1
a__ :int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
a__ :int = 0
a__ :List[str] = distance
heap.heapify(a , a )
for _ in range(1 , len(a ) ):
a__ :Dict = heap.delete_minimum(a , a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a__ :Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(a )]
):
a__ :List[str] = distance
heap.bottom_to_top(
a , heap.get_position(a ) , a , a )
a__ :str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
snake_case__ = int(input('''Enter number of edges: ''').strip())
snake_case__ = defaultdict(list)
for _ in range(edges_number):
snake_case__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 373 | 1 |
'''simple docstring'''
from collections import deque
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =process_name # process name
__lowercase =arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowercase =arrival_time
__lowercase =burst_time # remaining burst time
__lowercase =0 # total time of the process wait in ready queue
__lowercase =0 # time from arrival time to completion time
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : deque[Process] , _lowerCAmelCase : int , ):
'''simple docstring'''
__lowercase =number_of_queues
# time slice of queues that round robin algorithm applied
__lowercase =time_slices
# unfinished process is in this ready_queue
__lowercase =queue
# current time
__lowercase =current_time
# finished process is in this sequence queue
__lowercase =deque()
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[]
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def __lowerCamelCase ( self : Any , _lowerCAmelCase : list[Process]):
'''simple docstring'''
__lowercase =[]
for i in range(len(_lowerCAmelCase)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : list[Process]):
'''simple docstring'''
__lowercase =[]
for i in range(len(_lowerCAmelCase)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : list[Process]):
'''simple docstring'''
__lowercase =[]
for i in range(len(_lowerCAmelCase)):
completion_times.append(queue[i].stop_time)
return completion_times
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : deque[Process]):
'''simple docstring'''
return [q.burst_time for q in queue]
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Process):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : deque[Process]):
'''simple docstring'''
__lowercase =deque() # sequence deque of finished process
while len(_lowerCAmelCase) != 0:
__lowercase =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowerCAmelCase)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowercase =0
# set the process's turnaround time because it is finished
__lowercase =self.current_time - cp.arrival_time
# set the completion time
__lowercase =self.current_time
# add the process to queue that has finished queue
finished.append(_lowerCAmelCase)
self.finish_queue.extend(_lowerCAmelCase) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : deque[Process] , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowerCAmelCase)):
__lowercase =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowerCAmelCase)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowercase =self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowerCAmelCase)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowercase =0
# set the finish time
__lowercase =self.current_time
# update the process' turnaround time because it is finished
__lowercase =self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowerCAmelCase)
self.finish_queue.extend(_lowerCAmelCase) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
for i in range(self.number_of_queues - 1):
__lowercase , __lowercase =self.round_robin(
self.ready_queue , self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase = Process("""P1""", 0, 53)
lowerCamelCase = Process("""P2""", 0, 17)
lowerCamelCase = Process("""P3""", 0, 68)
lowerCamelCase = Process("""P4""", 0, 24)
lowerCamelCase = 3
lowerCamelCase = [17, 25]
lowerCamelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase = Process("""P1""", 0, 53)
lowerCamelCase = Process("""P2""", 0, 17)
lowerCamelCase = Process("""P3""", 0, 68)
lowerCamelCase = Process("""P4""", 0, 24)
lowerCamelCase = 3
lowerCamelCase = [17, 25]
lowerCamelCase = deque([Pa, Pa, Pa, Pa])
lowerCamelCase = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 474 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """xmod"""
def __init__( self : List[Any] , _lowerCAmelCase : Any=3_0_5_2_2 , _lowerCAmelCase : Tuple=7_6_8 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : int=3_0_7_2 , _lowerCAmelCase : Union[str, Any]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=5_1_2 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]="absolute" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : str=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=("en_XX",) , _lowerCAmelCase : int=None , **_lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =hidden_act
__lowercase =intermediate_size
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =position_embedding_type
__lowercase =use_cache
__lowercase =classifier_dropout
__lowercase =pre_norm
__lowercase =adapter_reduction_factor
__lowercase =adapter_layer_norm
__lowercase =adapter_reuse_layer_norm
__lowercase =ln_before_adapter
__lowercase =list(_lowerCAmelCase)
__lowercase =default_language
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 474 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
lowerCAmelCase = np.random.RandomState(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * [inputs['prompt']]
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * [inputs.pop('prompt' )]
lowerCAmelCase = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
lowerCAmelCase = text_inputs['input_ids']
lowerCAmelCase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCAmelCase = prompt_embeds
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * ['this is a negative prompt']
lowerCAmelCase = negative_prompt
lowerCAmelCase = 3 * [inputs['prompt']]
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * [inputs.pop('prompt' )]
lowerCAmelCase = []
for p in [prompt, negative_prompt]:
lowerCAmelCase = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
lowerCAmelCase = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCAmelCase , lowerCAmelCase = embeds
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
lowerCAmelCase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'open neural network exchange'
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'open neural network exchange'
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 0
def test_callback_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowerCAmelCase = False
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'Andromeda galaxy in a bottle'
lowerCAmelCase = np.random.RandomState(0 )
pipe(
prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
lowerCAmelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 514 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 514 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowercase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = BigBirdTokenizer
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : List[int] = []
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="[SEP]" , __lowercase="[MASK]" , __lowercase="[CLS]" , **__lowercase , ) -> int:
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else bos_token
__UpperCamelCase :List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else eos_token
__UpperCamelCase :int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else unk_token
__UpperCamelCase :str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else pad_token
__UpperCamelCase :Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else cls_token
__UpperCamelCase :Union[str, Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__UpperCamelCase :str = vocab_file
__UpperCamelCase :int = False if not self.vocab_file else True
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :int = [self.sep_token_id]
__UpperCamelCase :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Dict = [self.sep_token_id]
__UpperCamelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Dict = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase):
copyfile(self.vocab_file , __lowercase)
return (out_vocab_file,)
| 167 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : Optional[int] = bertabert.config.encoder.vocab_size
lowercase__ : str = tokenizer.sep_token_id
lowercase__ : Any = tokenizer.cls_token_id
lowercase__ : Optional[int] = 128
lowercase__ : int = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Any = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Any = val_dataset.select(range(16 ) )
lowercase__ : List[Any] = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : Dict = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCamelCase__ , max_length=512 )
lowercase__ : Any = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCamelCase__ , max_length=128 )
lowercase__ : Optional[Any] = inputs.input_ids
lowercase__ : List[Any] = inputs.attention_mask
lowercase__ : Optional[Any] = outputs.input_ids
lowercase__ : List[Any] = outputs.input_ids.copy()
lowercase__ : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(UpperCamelCase__ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase__ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : int = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[str] = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowercase__ : Dict = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
lowercase__ : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase__ ) )] ) / len(UpperCamelCase__ )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase__ , batch_size=UpperCamelCase__ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : Any = self.get_auto_remove_tmp_dir()
lowercase__ : Optional[Any] = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase__ , per_device_train_batch_size=UpperCamelCase__ , per_device_eval_batch_size=UpperCamelCase__ , predict_with_generate=UpperCamelCase__ , evaluation_strategy="steps" , do_train=UpperCamelCase__ , do_eval=UpperCamelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : List[str] = SeqaSeqTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , tokenizer=UpperCamelCase__ , )
# start training
trainer.train()
| 711 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ : str = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE__ : Tuple = {
'allenai/led-base-16384': 16_384,
}
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = LEDTokenizer
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self , A_=None , A_=None , A_=None , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , A_=True , **A_ , ):
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
_UpperCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A_ ) != add_prefix_space:
_UpperCAmelCase : Union[str, Any] = getattr(A_ , pre_tok_state.pop("""type""" ) )
_UpperCAmelCase : Optional[Any] = add_prefix_space
_UpperCAmelCase : List[str] = pre_tok_class(**A_ )
_UpperCAmelCase : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase : Dict = """post_processor"""
_UpperCAmelCase : Optional[int] = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
_UpperCAmelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Union[str, Any] = tuple(state["""sep"""] )
if "cls" in state:
_UpperCAmelCase : str = tuple(state["""cls"""] )
_UpperCAmelCase : Union[str, Any] = False
if state.get("""add_prefix_space""" , A_ ) != add_prefix_space:
_UpperCAmelCase : Any = add_prefix_space
_UpperCAmelCase : int = True
if state.get("""trim_offsets""" , A_ ) != trim_offsets:
_UpperCAmelCase : Optional[int] = trim_offsets
_UpperCAmelCase : str = True
if changes_to_apply:
_UpperCAmelCase : Tuple = getattr(A_ , state.pop("""type""" ) )
_UpperCAmelCase : Any = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __snake_case( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case( self , A_ ):
_UpperCAmelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
_UpperCAmelCase : Any = value
def __snake_case( self , *A_ , **A_ ):
_UpperCAmelCase : Dict = kwargs.get("""is_split_into_words""" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*A_ , **A_ )
def __snake_case( self , *A_ , **A_ ):
_UpperCAmelCase : Any = kwargs.get("""is_split_into_words""" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*A_ , **A_ )
def __snake_case( self , A_ , A_ = None ):
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def __snake_case( self , A_ , A_=None ):
_UpperCAmelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case( self , A_ , A_ = None ):
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case( self , A_ , A_ = None , A_ = PaddingStrategy.DO_NOT_PAD , A_ = None , A_ = None , ):
_UpperCAmelCase : Union[str, Any] = super()._pad(
encoded_inputs=A_ , max_length=A_ , padding_strategy=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase : Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase : Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(A_ )
if needs_to_be_padded:
_UpperCAmelCase : Optional[int] = len(A_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase : Any = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 643 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _SCREAMING_SNAKE_CASE ( A , A , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = IFPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __snake_case( self ):
return self._get_dummy_components()
def __snake_case( self , A_ , A_=0 ):
if str(A_ ).startswith("""mps""" ):
_UpperCAmelCase : Tuple = torch.manual_seed(A_ )
else:
_UpperCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __snake_case( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __snake_case( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __snake_case( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __snake_case( self ):
self._test_save_load_local()
def __snake_case( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __snake_case( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __snake_case( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self ):
# if
_UpperCAmelCase : List[str] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_UpperCAmelCase : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_UpperCAmelCase,_UpperCAmelCase : Dict = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Any = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCAmelCase : Any = IFImgaImgPipeline(**pipe_a.components )
_UpperCAmelCase : Union[str, Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCAmelCase : Optional[int] = IFInpaintingPipeline(**pipe_a.components )
_UpperCAmelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def __snake_case( self , A_ , A_ , A_ , A_ ):
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_UpperCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : str = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def __snake_case( self , A_ , A_ , A_ , A_ ):
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Optional[int] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def __snake_case( self , A_ , A_ , A_ , A_ ):
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(A_ )
_UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : str = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : str = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(A_ )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def a__ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 643 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_: Any =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Any =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_: int =CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE_: List[str] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[Any]=0 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_: str =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" )
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: List[str] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Any =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: int =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ="""french fries"""
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =output.images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[int] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[inputs["""prompt"""]] * 2
SCREAMING_SNAKE_CASE_: str =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =image / 2 + 0.5
SCREAMING_SNAKE_CASE_: List[str] =image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_: Optional[int] =image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE_: int =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Any =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: List[str] =EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" )
SCREAMING_SNAKE_CASE_: int =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: str =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[Any] =[round(lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =VaeImageProcessor(do_resize=lowerCAmelCase , do_normalize=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase , input_image_type="""pt""" ) )[0]
SCREAMING_SNAKE_CASE_: Optional[int] =components["""vae"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_inputs_by_type(lowerCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE_: Any =vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE_: Optional[Any] =pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: int =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[Any]=0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
SCREAMING_SNAKE_CASE_: int ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: Dict =self.get_inputs()
SCREAMING_SNAKE_CASE_: str =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: str =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[Any] =self.get_inputs()
SCREAMING_SNAKE_CASE_: Tuple =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: str =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: Dict =self.get_inputs()
SCREAMING_SNAKE_CASE_: Any =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Any =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =0
def callback_fn(lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE_: str =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE_: List[str] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_: str =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Tuple =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE_: int =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_: Union[str, Any] =latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE_: Any =False
SCREAMING_SNAKE_CASE_: int =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: List[str] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: int =self.get_inputs()
pipe(**lowerCAmelCase , callback=lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Any =self.get_inputs()
SCREAMING_SNAKE_CASE_: Dict =pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_: Optional[Any] =inputs["""image"""].resize((504, 504) )
SCREAMING_SNAKE_CASE_: List[str] ="""timbrooks/instruct-pix2pix"""
SCREAMING_SNAKE_CASE_: str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase , safety_checker=lowerCAmelCase , )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: int =pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =output.images[0]
SCREAMING_SNAKE_CASE_: Optional[int] =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
SCREAMING_SNAKE_CASE_: List[str] =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 720 |
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCAmelCase__(__snake_case ,__snake_case=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def lowerCAmelCase__(__snake_case ,__snake_case=None ,__snake_case=None ) -> List[Any]:
'''simple docstring'''
if conf_path is None:
lowerCamelCase__ = '''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase__ = load_config(__snake_case ,display=__snake_case )
lowerCamelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase__ = '''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase__ = torch.load(__snake_case ,map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowerCamelCase__ = sd['''state_dict''']
model.load_state_dict(__snake_case ,strict=__snake_case )
model.to(__snake_case )
del sd
return model
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model.encode(__snake_case )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCamelCase__ = model.decode(__snake_case )
return xrec
def lowerCAmelCase__(__snake_case ,__snake_case=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = string.rsplit('''.''' ,1 )
if reload:
lowerCamelCase__ = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case ,package=__snake_case ) ,cls )
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' ,{} ) )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=True ,__snake_case=True ) -> str:
'''simple docstring'''
lowerCamelCase__ = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
if ckpt:
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )
lowerCamelCase__ = pl_sd['''global_step''']
print(F'loaded model from global step {global_step}.' )
else:
lowerCamelCase__ = {'''state_dict''': None}
lowerCamelCase__ = None
lowerCamelCase__ = load_model_from_config(config.model ,pl_sd['''state_dict'''] ,gpu=__snake_case ,eval_mode=__snake_case )['''model''']
return model, global_step
| 481 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer('''This is me''' , return_tensors='''pt''' )
lowerCamelCase__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCamelCase__ = model.generate(**__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCamelCase__ = model_reloaded.generate(**__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCAmelCase ):
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
model.save_pretrained(__lowerCAmelCase )
| 481 | 1 |
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE = tuple[int, int]
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = []
__a = set()
def snake_case_ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def snake_case_ ( self ):
return len(self.elements ) == 0
def snake_case_ ( self , __A , __A ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__lowerCamelCase )
else:
# update
# print("update", item)
__a = []
(__a) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(__a) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case_ ( self , __A ):
if item in self.set:
self.set.remove(__lowerCamelCase )
__a = []
(__a) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(__a) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case_ ( self ):
return self.elements[0][1]
def snake_case_ ( self ):
(__a) = heapq.heappop(self.elements )
self.set.remove(__lowerCamelCase )
return (priority, item)
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = np.array(_A )
__a = np.array(_A )
return np.linalg.norm(a - b )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
return consistent_heuristic(_A , _A ) // t
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = g_function[start] + Wa * heuristics[i](_A , _A )
return ans
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = np.chararray((n, n) )
for i in range(_A ):
for j in range(_A ):
__a = "*"
for i in range(_A ):
for j in range(_A ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
(__a) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(_A ):
for j in range(_A ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__a = back_pointer[goal]
while x != start:
print(_A , end=""" """ )
__a = back_pointer[x]
print(_A )
sys.exit()
def a (lowerCAmelCase__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
for itera in range(_A ):
open_list[itera].remove_element(_A )
# print("s", s)
# print("j", j)
(__a) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_A ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_A )
__a = -1
__a = float("""inf""" )
if valid(_A ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(_A , key(_A , 0 , _A , _A ) )
if neighbours not in close_list_inad:
for var in range(1 , _A ):
if key(_A , _A , _A , _A ) <= Wa * key(
_A , 0 , _A , _A ):
open_list[j].put(
_A , key(_A , _A , _A , _A ) )
def a ():
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
SCREAMING_SNAKE_CASE = make_common_ground()
SCREAMING_SNAKE_CASE = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2_0
SCREAMING_SNAKE_CASE = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = (n - 1, n - 1)
SCREAMING_SNAKE_CASE = 1
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = {start: 0, goal: float("""inf""" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(_A ):
open_list.append(PriorityQueue() )
open_list[i].put(_A , key(_A , _A , _A , _A ) )
__a = []
__a = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , _A ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(_A , _A , _A )
else:
__a = open_list[i].top_show()
visited.add(_A )
expand_state(
_A , _A , _A , _A , _A , _A , _A , _A , )
close_list_inad.append(_A )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(_A , _A , _A )
else:
__a = open_list[0].top_show()
visited.add(_A )
expand_state(
_A , 0 , _A , _A , _A , _A , _A , _A , )
close_list_anchor.append(_A )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_A ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 705 |
import doctest
from collections import deque
import numpy as np
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = [2, 1, 2, -1]
__a = [1, 2, 3, 4]
def snake_case_ ( self ):
__a = len(self.first_signal )
__a = len(self.second_signal )
__a = max(__A , __A )
# create a zero matrix of max_length x max_length
__a = [[0] * max_length for i in range(__A )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__A ):
__a = deque(self.second_signal )
rotated_signal.rotate(__A )
for j, item in enumerate(__A ):
matrix[i][j] += item
# multiply the matrix with the first signal
__a = np.matmul(np.transpose(__A ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__A , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 209 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = torch.nn.Linear(10 , 10 )
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = accelerator.prepare(_a )
try:
pickle.loads(pickle.dumps(_a ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 122 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self , _a , _a=2 , _a=True , _a=False , _a=10 , _a=3 , _a=32 * 8 , _a=32 * 8 , _a=4 , _a=64 , ) -> List[str]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_auxiliary_loss
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_size
lowerCAmelCase_ = max_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = hidden_dim
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
lowerCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
lowerCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
lowerCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
lowerCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __a ( self ) -> int:
lowerCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ = self.num_queries
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = [1, 1, 1, 1]
lowerCAmelCase_ = self.num_channels
lowerCAmelCase_ = 64
lowerCAmelCase_ = 128
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
lowerCAmelCase_ = self.hidden_dim
return config
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __a ( self , _a , _a ) -> Optional[Any]:
lowerCAmelCase_ = output.encoder_hidden_states
lowerCAmelCase_ = output.pixel_decoder_hidden_states
lowerCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def __a ( self , _a , _a , _a , _a=False ) -> int:
with torch.no_grad():
lowerCAmelCase_ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a )
lowerCAmelCase_ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def __a ( self , _a , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase_ = model(pixel_values=_a , pixel_mask=_a )
lowerCAmelCase_ = model(_a )
comm_check_on_output(_a )
lowerCAmelCase_ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase__ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Tuple:
lowerCAmelCase_ = MaskaFormerModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def __a ( self ) -> Any:
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def __a ( self ) -> Optional[int]:
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def __a ( self ) -> str:
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def __a ( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ) -> List[str]:
pass
def __a ( self ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
@slow
def __a ( self ) -> int:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = (self.model_tester.min_size,) * 2
lowerCAmelCase_ = {
"pixel_values": torch.randn((2, 3, *size) , device=_a ),
"mask_labels": torch.randn((2, 10, *size) , device=_a ),
"class_labels": torch.zeros(2 , 10 , device=_a ).long(),
}
lowerCAmelCase_ = self.model_tester.get_config()
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
lowerCAmelCase_ = model(**_a )
self.assertTrue(outputs.loss is not None )
def __a ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_a ).to(_a )
lowerCAmelCase_ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __a ( self ) -> List[str]:
if not self.model_tester.is_training:
return
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = model_class(_a )
model.to(_a )
model.train()
lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_a ).to(_a )
model.train()
lowerCAmelCase_ = model(_a , mask_labels=_a , class_labels=_a )
lowerCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def A():
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
@cached_property
def __a ( self ) -> Dict:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __a ( self ) -> Optional[Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __a ( self ) -> int:
lowerCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a )
lowerCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
lowerCAmelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowerCAmelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowerCAmelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def __a ( self ) -> str:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a )
lowerCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
# masks_queries_logits
lowerCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
lowerCAmelCase_ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
lowerCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase_ = inputs["pixel_values"].to(_a )
lowerCAmelCase_ = [el.to(_a ) for el in inputs["mask_labels"]]
lowerCAmelCase_ = [el.to(_a ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCAmelCase_ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 122 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 38 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _snake_case:
def __init__(self : List[str] , a : Dict , a : List[Any]=13 , a : Union[str, Any]=32 , a : Any=2 , a : Optional[int]=3 , a : List[Any]=16 , a : str=[1, 2, 1] , a : Tuple=[2, 2, 4] , a : Optional[int]=2 , a : List[str]=2.0 , a : Optional[Any]=True , a : Union[str, Any]=0.0 , a : Optional[Any]=0.0 , a : Union[str, Any]=0.1 , a : List[str]="gelu" , a : Any=False , a : Any=True , a : Optional[Any]=0.02 , a : Dict=1e-5 , a : Any=True , a : Optional[int]=None , a : List[str]=True , a : Optional[Any]=10 , a : Any=8 , a : Dict=["stage1", "stage2", "stage3"] , a : Tuple=[1, 2, 3] , ) -> List[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = patch_norm
A__ = layer_norm_eps
A__ = initializer_range
A__ = is_training
A__ = scope
A__ = use_labels
A__ = type_sequence_label_size
A__ = encoder_stride
A__ = out_features
A__ = out_indices
def _UpperCamelCase (self : List[str] ) -> int:
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase (self : str ) -> Tuple:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCamelCase (self : int , a : Dict , a : Any , a : List[Any] ) -> List[str]:
"""simple docstring"""
A__ = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
A__ = model(a )
A__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCamelCase (self : Optional[int] , a : Optional[int] , a : List[Any] , a : Any ) -> str:
"""simple docstring"""
A__ = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
A__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(a ):
A__ = ['stem']
A__ = MaskFormerSwinBackbone(config=a )
def _UpperCamelCase (self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__snake_case: Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__snake_case: Optional[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
__snake_case: Optional[Any] = False
__snake_case: Union[str, Any] = False
__snake_case: int = False
__snake_case: int = False
__snake_case: Optional[Any] = False
def _UpperCamelCase (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = MaskFormerSwinModelTester(self )
A__ = ConfigTester(self , config_class=a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCamelCase (self : Dict ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase (self : Tuple ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase (self : int ) -> Tuple:
"""simple docstring"""
return
def _UpperCamelCase (self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase (self : Dict ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCamelCase (self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCamelCase (self : Dict ) -> str:
"""simple docstring"""
pass
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCamelCase (self : Dict ) -> Any:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCamelCase (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCamelCase (self : str ) -> Tuple:
"""simple docstring"""
pass
def _UpperCamelCase (self : List[Any] , a : Union[str, Any] , a : int , a : Optional[Any] , a : int ) -> Any:
"""simple docstring"""
A__ = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(a , a ) )
A__ = outputs.hidden_states
A__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCamelCase (self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A__ = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCamelCase (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A__ = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCamelCase (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCamelCase (self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCamelCase (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a : List[str] ):
A__ = 0
return t
def check_equivalence(a : Optional[int] , a : Tuple , a : Tuple , a : List[str]={} ):
with torch.no_grad():
A__ = model(**a , return_dict=a , **a )
A__ = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a : Tuple , a : Any ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
A__ = model_class(a )
model.to(a )
model.eval()
A__ = self._prepare_for_class(a , a )
A__ = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
A__ = self._prepare_for_class(a , a , return_labels=a )
A__ = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
A__ = self._prepare_for_class(a , a )
A__ = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
A__ = self._prepare_for_class(a , a , return_labels=a )
A__ = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class _snake_case( unittest.TestCase , UpperCAmelCase ):
__snake_case: List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__snake_case: List[str] = MaskFormerSwinConfig
def _UpperCamelCase (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = MaskFormerSwinModelTester(self )
def _UpperCamelCase (self : Tuple ) -> str:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
A__ = backbone_class(a )
backbone.to(a )
backbone.eval()
A__ = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
A__ = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
A__ , A__ , A__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
A__ = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 531 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__snake_case: Union[str, Any] = StableUnCLIPPipeline
__snake_case: List[str] = TEXT_TO_IMAGE_PARAMS
__snake_case: str = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__snake_case: str = False
def _UpperCamelCase (self : Optional[int] ) -> int:
"""simple docstring"""
A__ = 32
A__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a , num_layers=1 , )
torch.manual_seed(0 )
A__ = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=a , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A__ = StableUnCLIPImageNormalizer(embedding_dim=a )
A__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a , layers_per_block=1 , upcast_attention=a , use_linear_projection=a , )
torch.manual_seed(0 )
A__ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=a , steps_offset=1 , )
torch.manual_seed(0 )
A__ = AutoencoderKL()
A__ = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def _UpperCamelCase (self : List[Any] , a : Optional[Any] , a : str=0 ) -> Dict:
"""simple docstring"""
if str(a ).startswith('mps' ):
A__ = torch.manual_seed(a )
else:
A__ = torch.Generator(device=a ).manual_seed(a )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase (self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=a )
def _UpperCamelCase (self : Dict ) -> str:
"""simple docstring"""
A__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=a )
@slow
@require_torch_gpu
class _snake_case( unittest.TestCase ):
def _UpperCamelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe('anime turle' , generator=a , output_type='np' )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a , a )
def _UpperCamelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 531 | 1 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[Any] = None
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , 'feature_size' ) )
self.assertTrue(hasattr(A_ , 'sampling_rate' ) )
self.assertTrue(hasattr(A_ , 'padding_value' ) )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
def _inputs_have_equal_length(a : int ):
lowercase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(a : int , a : Optional[int] ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(A_ , padding=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' )[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , return_tensors='np' )
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(A_ , pad_to_multiple_of=10 )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , pad_to_multiple_of=10 )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[int]=False )-> Dict:
"""simple docstring"""
def _inputs_have_equal_length(a : Any ):
lowercase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(a : str , a : str ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=A_ , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
self._check_padding(numpify=A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict:
"""simple docstring"""
self._check_truncation(numpify=A_ )
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
self._check_truncation(numpify=A_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**A_ )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(A_ ) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**A_ )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(A_ ) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = min(A_ )
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 704 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.