code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : str = LEDTokenizer
lowerCamelCase : Any = LEDTokenizerFast
lowerCamelCase : int = True
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
super().setUp()
lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase = {'unk_token': '<unk>'}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , **UpperCAmelCase__ : Optional[int] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Any ) -> int:
return "lower newer", "lower newer"
@cached_property
def __UpperCAmelCase ( self : int ) -> List[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='pt' )
self.assertIn('input_ids' , UpperCAmelCase__ )
self.assertIn('attention_mask' , UpperCAmelCase__ )
self.assertNotIn('labels' , UpperCAmelCase__ )
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase__ )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=UpperCAmelCase__ , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def __UpperCAmelCase ( self : Any ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = ['A long paragraph for summarization.']
lowerCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='pt' )
lowerCAmelCase = tokenizer(text_target=UpperCAmelCase__ , return_tensors='pt' )
lowerCAmelCase = inputs['input_ids']
lowerCAmelCase = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = ['Summary of the text.', 'Another summary.']
lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ )
lowerCAmelCase = [[0] * len(UpperCAmelCase__ ) for x in encoded_output['input_ids']]
lowerCAmelCase = tokenizer.pad(UpperCAmelCase__ )
self.assertSequenceEqual(outputs['global_attention_mask'] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = 'A, <mask> AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 4
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case : Dict = 4
snake_case : str = (1 << p) - 1
for _ in range(p - 2 ):
snake_case : Any = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 203
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a =["""text""", """image""", """audio"""]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
inputs.append(create_inputs(lowerCamelCase__ ) )
else:
raise ValueError(F"Invalid type requested: {input_type}" )
return inputs
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Any = []
for output in outputs:
if isinstance(lowerCamelCase__ , (str, AgentText) ):
output_types.append('text' )
elif isinstance(lowerCamelCase__ , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(lowerCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F"Invalid output: {output}" )
return output_types
@is_tool_test
class A_ :
def lowerCAmelCase ( self : str):
self.assertTrue(hasattr(self.tool ,'inputs'))
self.assertTrue(hasattr(self.tool ,'outputs'))
__lowerCamelCase : Union[str, Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,SCREAMING_SNAKE_CASE__):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
__lowerCamelCase : Optional[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[str] = create_inputs(self.tool.inputs)
__lowerCamelCase : Optional[int] = self.tool(*SCREAMING_SNAKE_CASE__)
# There is a single output
if len(self.tool.outputs) == 1:
__lowerCamelCase : Tuple = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE__) ,self.tool.outputs)
def lowerCAmelCase ( self : List[str]):
self.assertTrue(hasattr(self.tool ,'description'))
self.assertTrue(hasattr(self.tool ,'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : int = create_inputs(self.tool.inputs)
__lowerCamelCase : Dict = self.tool(*SCREAMING_SNAKE_CASE__)
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(self.tool.outputs))
for output, output_type in zip(SCREAMING_SNAKE_CASE__ ,self.tool.outputs):
__lowerCamelCase : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = create_inputs(self.tool.inputs)
__lowerCamelCase : int = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE__ ,self.tool.inputs):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
__lowerCamelCase : Dict = self.tool(*SCREAMING_SNAKE_CASE__)
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(self.tool.outputs))
| 113
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a =[
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
__lowerCamelCase : int = True
while ask_again:
__lowerCamelCase : Dict = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=[] , lowerCamelCase__=None , lowerCamelCase__=0 ) -> str:
__lowerCamelCase : Union[str, Any] = BulletMenu(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : List[str] = int(lowerCamelCase__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class A_ ( argparse.RawDescriptionHelpFormatter ):
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = super()._format_usage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = usage.replace('<command> [<args>] ' ,'')
return usage
| 113
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = KandinskyVaaImgaImgPipeline
UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def snake_case ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 3_2
@property
def snake_case ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return 3_2
@property
def snake_case ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return 1_0_0
@property
def snake_case ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : List[str] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_UpperCamelCase : Dict = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def snake_case ( self : int ) -> int:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.dummy_unet
_UpperCamelCase : List[str] = self.dummy_movq
_UpperCamelCase : List[str] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_UpperCamelCase : Tuple = DDIMScheduler(**lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case ( self : List[Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : int=0 ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCamelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
_UpperCamelCase : int = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCamelCase : str = image.cpu().permute(0, 2, 3, 1 )[0]
_UpperCamelCase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
_UpperCamelCase : List[str] = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase : Tuple = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase : Dict = '''cpu'''
_UpperCamelCase : int = self.get_dummy_components()
_UpperCamelCase : List[Any] = self.pipeline_class(**lowerCAmelCase__ )
_UpperCamelCase : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : str = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCamelCase : Any = output.images
_UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ), return_dict=lowerCAmelCase__, )[0]
_UpperCamelCase : int = image[0, -3:, -3:, -1]
_UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCamelCase : List[Any] = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def snake_case ( self : int ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
_UpperCamelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_UpperCamelCase : Tuple = '''A red cartoon frog, 4k'''
_UpperCamelCase : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
_UpperCamelCase : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''', torch_dtype=torch.floataa )
_UpperCamelCase : Tuple = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase , _UpperCamelCase : List[str] = pipe_prior(
lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
_UpperCamelCase : Union[str, Any] = pipeline(
image=lowerCAmelCase__, image_embeds=lowerCAmelCase__, negative_image_embeds=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, strength=0.2, output_type='''np''', )
_UpperCamelCase : List[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__, lowerCAmelCase__ )
| 128
|
"""simple docstring"""
def a_ ( _lowercase = 100 ):
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 128
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[Any] = logging.get_logger(__name__)
a__ : List[Any] = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class a_ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 'bit'
__SCREAMING_SNAKE_CASE : Tuple = ['preactivation', 'bottleneck']
__SCREAMING_SNAKE_CASE : Tuple = ['SAME', 'VALID']
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=[256, 512, 1024, 2048] , _lowerCamelCase=[3, 4, 6, 3] , _lowerCamelCase="preactivation" , _lowerCamelCase="relu" , _lowerCamelCase=None , _lowerCamelCase=32 , _lowerCamelCase=0.0 , _lowerCamelCase=False , _lowerCamelCase=32 , _lowerCamelCase=1 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE : Any = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
SCREAMING_SNAKE_CASE : List[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Any = depths
SCREAMING_SNAKE_CASE : Dict = layer_type
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = global_padding
SCREAMING_SNAKE_CASE : Optional[Any] = num_groups
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Any = embedding_dynamic_padding
SCREAMING_SNAKE_CASE : List[Any] = output_stride
SCREAMING_SNAKE_CASE : int = width_factor
SCREAMING_SNAKE_CASE : Optional[int] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = 'Hello, World!'
_a = 'en_XX'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = Path("data_bin" )
UpperCAmelCase_ : str = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A__ ).parent ), checkpoint_file=Path(A__ ).name, _name="xmod_base", arch="xmod_base", task="multilingual_masked_lm", data_name_or_path=str(A__ ), bpe="sentencepiece", sentencepiece_model=str(Path(A__ ).parent / "sentencepiece.bpe.model" ), src_dict=str(data_dir / "dict.txt" ), )
xmod.eval() # disable dropout
print(A__ )
UpperCAmelCase_ : int = xmod.model.encoder.sentence_encoder
UpperCAmelCase_ : List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1E-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, "bottleneck", 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
UpperCAmelCase_ : Optional[int] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:", A__ )
UpperCAmelCase_ : Optional[Any] = XmodForSequenceClassification(A__ ) if classification_head else XmodForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : int = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase_ : List[Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : int = model.roberta.encoder.layer[i]
UpperCAmelCase_ : str = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase_ : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
UpperCAmelCase_ : Tuple = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase_ : Dict = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase_ : Dict = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase_ : List[Any] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase_ : str = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase_ : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : Tuple = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
UpperCAmelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase_ : Dict = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase_ : List[str] = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase_ : List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
UpperCAmelCase_ : List[str] = xmod_layer.fca.weight
UpperCAmelCase_ : int = xmod_layer.fca.bias
# output
UpperCAmelCase_ : Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
UpperCAmelCase_ : int = xmod_layer.fca.weight
UpperCAmelCase_ : Union[str, Any] = xmod_layer.fca.bias
UpperCAmelCase_ : List[Any] = xmod_layer.final_layer_norm.weight
UpperCAmelCase_ : str = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase_ : Dict = bert_output.adapter_modules[lang_code]
UpperCAmelCase_ : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase_ : int = from_adapter.fca.weight
UpperCAmelCase_ : Optional[Any] = from_adapter.fca.bias
UpperCAmelCase_ : Any = from_adapter.fca.weight
UpperCAmelCase_ : Tuple = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase_ : Union[str, Any] = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase_ : str = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase_ : List[Any] = xmod.model.classification_heads["mnli"].dense.weight
UpperCAmelCase_ : List[Any] = xmod.model.classification_heads["mnli"].dense.bias
UpperCAmelCase_ : Any = xmod.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase_ : int = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : Dict = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : Tuple = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : str = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : Tuple = xmod.model.encoder.lm_head.weight
UpperCAmelCase_ : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : List[str] = xmod.encode(A__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A__ )
UpperCAmelCase_ : str = model(A__ )[0]
if classification_head:
UpperCAmelCase_ : str = xmod.model.classification_heads["mnli"](xmod.extract_features(A__ ) )
else:
UpperCAmelCase_ : str = xmod.model(A__, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
UpperCAmelCase_ : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ : int = torch.allclose(A__, A__, atol=1E-3 )
print("Do both models output the same tensors?", "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(A__ ).mkdir(parents=A__, exist_ok=A__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_a = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 356
|
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23
| 0
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCAmelCase_ : Any = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCAmelCase_ : List[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _SCREAMING_SNAKE_CASE :
def __init__( self : int ):
UpperCamelCase :Optional[int] = WATERMARK_BITS
UpperCamelCase :Dict = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def _A ( self : str , __lowerCamelCase : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
UpperCamelCase :Optional[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase :Dict = [self.encoder.encode(__lowerCamelCase , """dwtDct""" ) for image in images]
UpperCamelCase :Optional[Any] = torch.from_numpy(np.array(__lowerCamelCase ) ).permute(0 , 3 , 1 , 2 )
UpperCamelCase :Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 38
|
'''simple docstring'''
import os
def lowerCamelCase ( UpperCAmelCase__ : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) as input_file:
lowercase_ : str = [
[int(UpperCAmelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
lowercase_ : Optional[Any] = len(UpperCAmelCase__ )
lowercase_ : Any = len(matrix[0] )
lowercase_ : Union[str, Any] = [[-1 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
lowercase_ : int = matrix[i][0]
for j in range(1 , UpperCAmelCase__ ):
for i in range(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase__ ):
lowercase_ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase_ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = CpmAntTokenizer
__A = False
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
lowercase_ :List[str] = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
lowercase_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Tuple = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
lowercase_ :Tuple = "今天天气真好!"
lowercase_ :List[Any] = ["今天", "天气", "真", "好", "!"]
lowercase_ :int = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowercase_ :List[str] = "今天天气真好!"
lowercase_ :List[Any] = [tokenizer.bos_token] + tokens
lowercase_ :Dict = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
lowercase_ :Tuple = tokenizer.decode(lowercase )
self.assertEqual(lowercase , lowercase )
| 359
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =['''CLIPFeatureExtractor''']
lowerCAmelCase : Optional[int] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147
| 0
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_: Any =[]
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int ) -> bool:
'''simple docstring'''
for i in range(len(snake_case_ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , len(snake_case_ ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> bool:
'''simple docstring'''
if row >= len(snake_case_ ):
solution.append(snake_case_ )
printboard(snake_case_ )
print()
return True
for i in range(len(snake_case_ ) ):
if is_safe(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = 1
solve(snake_case_ , row + 1 )
UpperCAmelCase_ = 0
return False
def lowerCAmelCase_ ( snake_case_ : list[list[int]] ) -> None:
'''simple docstring'''
for i in range(len(snake_case_ ) ):
for j in range(len(snake_case_ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
SCREAMING_SNAKE_CASE_: Tuple =8
SCREAMING_SNAKE_CASE_: Optional[Any] =[[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1
| 1
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _lowercase ):
def __init__( self : Dict , A : CLIPSegForImageSegmentation , A : CLIPSegProcessor , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
_UpperCAmelCase : str = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase : Any = dict(scheduler.config )
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Dict = FrozenDict(__UpperCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
_UpperCAmelCase : List[str] = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase : Optional[int] = dict(scheduler.config )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Dict = FrozenDict(__UpperCamelCase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__UpperCamelCase , segmentation_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def snake_case_ ( self : Any , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def snake_case_ ( self : Optional[Any] ):
self.enable_attention_slicing(__UpperCamelCase )
def snake_case_ ( self : Dict ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , A : Union[str, List[str]] , A : Union[torch.FloatTensor, PIL.Image.Image] , A : str , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 5_0 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , **A : Tuple , ):
_UpperCAmelCase : Any = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
_UpperCAmelCase : Any = self.segmentation_model(**__UpperCamelCase )
_UpperCAmelCase : Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__UpperCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_UpperCAmelCase : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , )
| 360
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
_lowerCAmelCase : str = {
"allenai/longformer-base-4096": 40_96,
"allenai/longformer-large-4096": 40_96,
"allenai/longformer-large-4096-finetuned-triviaqa": 40_96,
"allenai/longformer-base-4096-extra.pos.embd.only": 40_96,
"allenai/longformer-large-4096-extra.pos.embd.only": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_UpperCAmelCase : Any = bs[:]
_UpperCAmelCase : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase : Union[str, Any] = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : int = set()
_UpperCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : Optional[int] = char
return pairs
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , A : int , A : Any , A : List[str]="replace" , A : List[Any]="<s>" , A : int="</s>" , A : Union[str, Any]="</s>" , A : Tuple="<s>" , A : str="<unk>" , A : Dict="<pad>" , A : Optional[Any]="<mask>" , A : Tuple=False , **A : Dict , ):
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
_UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
_UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
_UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
_UpperCAmelCase : Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
_UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Union[str, Any] = json.load(A )
_UpperCAmelCase : List[str] = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : Dict = errors # how to handle errors in decoding
_UpperCAmelCase : List[str] = bytes_to_unicode()
_UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="utf-8" ) as merges_handle:
_UpperCAmelCase : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
_UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase : List[str] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase : Optional[int] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def snake_case_ ( self : Optional[Any] ):
return len(self.encoder )
def snake_case_ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Tuple , A : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : Optional[int] = tuple(A )
_UpperCAmelCase : Optional[Any] = get_pairs(A )
if not pairs:
return token
while True:
_UpperCAmelCase : Optional[int] = min(A , key=lambda A : self.bpe_ranks.get(A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase : str = bigram
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Union[str, Any] = 0
while i < len(A ):
try:
_UpperCAmelCase : int = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase : Dict = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : Optional[Any] = tuple(A )
_UpperCAmelCase : Dict = new_word
if len(A ) == 1:
break
else:
_UpperCAmelCase : Optional[int] = get_pairs(A )
_UpperCAmelCase : Any = " ".join(A )
_UpperCAmelCase : int = word
return word
def snake_case_ ( self : Optional[int] , A : List[str] ):
_UpperCAmelCase : str = []
for token in re.findall(self.pat , A ):
_UpperCAmelCase : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(" " ) )
return bpe_tokens
def snake_case_ ( self : Optional[int] , A : Union[str, Any] ):
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : Union[str, Any] , A : List[str] ):
return self.decoder.get(A )
def snake_case_ ( self : Dict , A : int ):
_UpperCAmelCase : Tuple = "".join(A )
_UpperCAmelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def snake_case_ ( self : int , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + "\n" )
_UpperCAmelCase : Optional[Any] = 0
with open(A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_UpperCAmelCase : Any = token_index
writer.write(" ".join(A ) + "\n" )
index += 1
return vocab_file, merge_file
def snake_case_ ( self : int , A : List[int] , A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
_UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def snake_case_ ( self : str , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self : str , A : List[str] , A : List[str]=False , **A : List[str] ):
_UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
_UpperCAmelCase : List[Any] = " " + text
return (text, kwargs)
| 202
| 0
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase_ = sys.version_info >= (3, 10)
def lowerCAmelCase (__A=None , __A=None):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__A)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : int
__lowerCamelCase : float
__lowerCamelCase : str
__lowerCamelCase : bool
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : int = 42
__lowerCamelCase : str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[bool] = None
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'titi'
__lowerCamelCase : Optional[Any] = 'toto'
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 'titi'
__lowerCamelCase : Optional[Any] = 'toto'
__lowerCamelCase : List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : BasicEnum = "toto"
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : MixedTypeEnum = "toto"
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[float] = field(default=A , metadata={'help': 'help message'} )
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[List[str]] = list_field(default=[] )
__lowerCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : List[int] = list_field(default=[] )
__lowerCamelCase : List[int] = list_field(default=[1, 2, 3] )
__lowerCamelCase : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__lowerCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : List[int] = field()
__lowerCamelCase : str = field()
__lowerCamelCase : BasicEnum = field()
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : int
__lowerCamelCase : "BasicEnum" = field()
__lowerCamelCase : "Optional[bool]" = None
__lowerCamelCase : "str" = field(default='toto' , metadata={'help': 'help message'} )
__lowerCamelCase : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : int | None = None
__lowerCamelCase : float | None = field(default=A , metadata={'help': 'help message'} )
__lowerCamelCase : str | None = None
__lowerCamelCase : list[str] | None = list_field(default=[] )
__lowerCamelCase : list[int] | None = list_field(default=[] )
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self , A , A ) -> str:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_a = {k: v for k, v in vars(A ).items() if k != '''container'''}
_a = {k: v for k, v in vars(A ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , A ) and yy.get('''choices''' , A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](A ) , yy['''type'''](A ) )
del xx["type"], yy["type"]
self.assertEqual(A , A )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=A , required=A )
expected.add_argument('''--bar''' , type=A , required=A )
expected.add_argument('''--baz''' , type=A , required=A )
expected.add_argument('''--flag''' , type=A , default=A , const=A , nargs='''?''' )
self.argparsersEqual(A , A )
_a = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_a) , ) = parser.parse_args_into_dataclasses(A , look_for_args_file=A )
self.assertFalse(example.flag )
def a__ (self ) -> int:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=A )
expected.add_argument('''--baz''' , default='''toto''' , type=A , help='''help message''' )
self.argparsersEqual(A , A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=A , default=A , const=A , nargs='''?''' )
expected.add_argument('''--baz''' , type=A , default=A , const=A , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=A , dest='''baz''' )
expected.add_argument('''--opt''' , type=A , default=A )
_a = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
_a = HfArgumentParser(A )
self.argparsersEqual(A , A )
_a = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_a = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_a = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_a = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
_a = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(A , A )
_a = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_a = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_a = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_a = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_a = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
_a = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : Literal["titi", "toto", 42] = "toto"
_a = HfArgumentParser(A )
_a = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(A , A )
_a = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_a = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_a = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=A )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=A )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=A )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=A )
self.argparsersEqual(A , A )
_a = parser.parse_args([] )
self.assertEqual(
A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_a = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=A , type=A )
expected.add_argument('''--bar''' , default=A , type=A , help='''help message''' )
expected.add_argument('''--baz''' , default=A , type=A )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=A )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=A )
_a = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
_a = HfArgumentParser(A )
self.argparsersEqual(A , A )
_a = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , bar=A , baz=A , ces=[] , des=[] ) )
_a = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(A , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=A , required=A )
expected.add_argument('''--required_str''' , type=A , required=A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=A , )
self.argparsersEqual(A , A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=A , required=A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=A , )
expected.add_argument('''--opt''' , type=A , default=A )
expected.add_argument('''--baz''' , default='''toto''' , type=A , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=A )
self.argparsersEqual(A , A )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
_a = parser.parse_dict(A )[0]
_a = BasicExample(**A )
self.assertEqual(A , A )
def a__ (self ) -> Any:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(A , parser.parse_dict , A , allow_extra_keys=A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_a = os.path.join(A , '''temp_json''' )
os.mkdir(A )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(A , A )
_a = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_a = BasicExample(**A )
self.assertEqual(A , A )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = HfArgumentParser(A )
_a = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_a = os.path.join(A , '''temp_yaml''' )
os.mkdir(A )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(A , A )
_a = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_a = BasicExample(**A )
self.assertEqual(A , A )
def a__ (self ) -> int:
"""simple docstring"""
_a = HfArgumentParser(A )
self.assertIsNotNone(A )
| 211
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
if isinstance(__A , __A):
_a = np.full((len(__A), sequence_length, 2) , __A)
else:
_a = np.full((len(__A), sequence_length) , __A)
for i, tensor in enumerate(__A):
if padding_side == "right":
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase (__A):
"""simple docstring"""
_a = ord(__A)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_a = unicodedata.category(__A)
if cat.startswith('''P'''):
return True
return False
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : PreTrainedTokenizerBase
__lowerCamelCase : Union[bool, str, PaddingStrategy] = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = -100
__lowerCamelCase : str = "pt"
def a__ (self , A ) -> List[str]:
"""simple docstring"""
import torch
_a = '''label''' if '''label''' in features[0].keys() else '''labels'''
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['''entity_ids'''] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels
]
_a = [feature['''ner_tags'''] for feature in features]
_a = padding_tensor(A , -1 , A , A )
_a = [feature['''original_entity_spans'''] for feature in features]
_a = padding_tensor(A , (-1, -1) , A , A )
_a = {k: torch.tensor(A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 211
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE_ ( snake_case : list[Any] )-> None:
create_state_space_tree(snake_case , [] , 0 )
def SCREAMING_SNAKE_CASE_ ( snake_case : list[Any] , snake_case : list[Any] , snake_case : int )-> None:
if index == len(snake_case ):
print(snake_case )
return
create_state_space_tree(snake_case , snake_case , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case , snake_case , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 367
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : list )-> list:
def merge(snake_case : list , snake_case : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case ) <= 1:
return collection
_lowerCamelCase = len(snake_case ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : int =input("""Enter numbers separated by a comma:\n""").strip()
A_ : Dict =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 80
| 0
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
with open(UpperCamelCase , """rb""" ) as flax_state_f:
lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCAmelCase__ : Dict = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
lowerCAmelCase__ : Any = """"""
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" )
lowerCAmelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase__ : Optional[int] = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 37
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCAmelCase__ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase__ : List[Any] = single_char_strings[ch]
lowerCAmelCase__ : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase__ : Dict = sum(two_char_strings.values() )
lowerCAmelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase__ : Union[str, Any] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase__ : Dict = two_char_strings[sequence]
lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore
lowerCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 37
| 1
|
"""simple docstring"""
import os
from pathlib import Path
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase__ : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
lowerCAmelCase__ : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
lowerCAmelCase__ : Tuple = f"""\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"""
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = os.path.join(__UpperCAmelCase , """README.md""" )
print(f"""Generating {path}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCAmelCase )
# make sure we are under the root of the project
_A = Path(__file__).resolve().parent.parent.parent
_A = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_A = model_name.split("""-""")
_A = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 360
|
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowercase_ ( __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCamelCase :
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : int ) -> int:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Any=None , **UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCAmelCase ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Any=None , **UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None , **UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCAmelCase ( self : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Tuple=None , **UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model(input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase )
lowerCAmelCase__ : int = after_output[0].numpy()
lowerCAmelCase__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase , 1E-5 )
def _lowerCAmelCase ( self : int , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=None , **UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : Dict = model(
input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , output_attentions=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Any = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : float ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase , UpperCamelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase )
@slow
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : Union[str, Any] = model_a(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = model_a(**UpperCamelCase )
lowerCAmelCase__ : Dict = after_outputs[0].numpy()
lowerCAmelCase__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase , 1E-5 )
@require_tf
class _lowerCamelCase ( a_ , unittest.TestCase ):
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
lowerCAmelCase__ : Optional[Any] = 13
lowerCAmelCase__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = TFViTModel(UpperCamelCase , name="""vision_model""" )
lowerCAmelCase__ : Any = TFBertModel(UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = TFViTModelTester(self )
lowerCAmelCase__ : str = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( a_ , unittest.TestCase ):
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
lowerCAmelCase__ : str = 13
lowerCAmelCase__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str=None , **UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.get_vision_text_model(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=UpperCamelCase , text_model=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model(
input_ids=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , output_attentions=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : Dict = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Any = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : Optional[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCAmelCase ( self : int , UpperCamelCase : Any , UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = TFDeiTModel(UpperCamelCase , name="""vision_model""" )
lowerCAmelCase__ : str = TFRobertaModel(UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TFDeiTModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFRobertaModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( a_ , unittest.TestCase ):
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
lowerCAmelCase__ : Any = 13
lowerCAmelCase__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : str = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = TFCLIPVisionModel(UpperCamelCase , name="""vision_model""" )
lowerCAmelCase__ : List[str] = TFBertModel(UpperCamelCase , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : int = TFBertModelTester(self )
lowerCAmelCase__ : str = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : Dict = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=UpperCamelCase )
lowerCAmelCase__ : Any = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase__ : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=UpperCamelCase , padding=UpperCamelCase , return_tensors="""np""" )
lowerCAmelCase__ : Tuple = model(**UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , UpperCamelCase , atol=1E-3 ) )
| 212
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __lowercase :
def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Optional[Any] = use_token_type_ids
__lowerCAmelCase : Optional[Any] = use_labels
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : List[Any] = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : Any = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Any = scope
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Tuple = None
if self.use_input_mask:
__lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = FalconModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )
__lowerCAmelCase : List[str] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = FalconModel(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : str = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
__lowerCAmelCase : Tuple = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
__lowerCAmelCase : int = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Any = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Any = True
__lowerCAmelCase : Any = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
__lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
__lowerCAmelCase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCAmelCase : Optional[int] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
__lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
__lowerCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : str = config_and_inputs
__lowerCAmelCase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCamelCase = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = FalconModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase, *__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__lowerCAmelCase : int = alibi
self.model_tester.create_and_check_model(A_ , *A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
__lowerCAmelCase : str = input_ids.ne(1 ).to(A_ )
__lowerCAmelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase : Tuple = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Union[str, Any] = 3
__lowerCAmelCase : Optional[Any] = '''single_label_classification'''
__lowerCAmelCase : Optional[int] = input_dict['''input_ids''']
__lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A_ )
__lowerCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase : int = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[Any] = input_dict['''input_ids''']
__lowerCAmelCase : List[str] = FalconForCausalLM(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ , use_cache=A_ )
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Optional[int] = model._convert_to_rw_cache(result.past_key_values )
__lowerCAmelCase : Optional[Any] = model._convert_cache_to_standard_format(A_ , A_ )
for layer in range(len(A_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : str = 3
__lowerCAmelCase : Union[str, Any] = '''multi_label_classification'''
__lowerCAmelCase : List[Any] = input_dict['''input_ids''']
__lowerCAmelCase : Tuple = input_ids.ne(1 ).to(A_ )
__lowerCAmelCase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase : Optional[int] = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Dict = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A_ , '''use_cache''' ):
return
__lowerCAmelCase : Optional[int] = model_class(A_ ).to(A_ )
if "use_cache" not in inputs:
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Tuple = model(**A_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__lowerCAmelCase : Union[str, Any] = (
getattr(A_ , '''decoder_layers''' , A_ )
or getattr(A_ , '''num_decoder_layers''' , A_ )
or config.num_hidden_layers
)
__lowerCAmelCase : List[str] = getattr(A_ , '''num_kv_heads''' , config.num_attention_heads )
__lowerCAmelCase : Optional[int] = getattr(A_ , '''d_model''' , config.hidden_size )
__lowerCAmelCase : List[str] = embed_dim // num_attention_heads
__lowerCAmelCase : List[Any] = outputs['''past_key_values''']
self.assertEqual(len(A_ ) , A_ )
__lowerCAmelCase, __lowerCAmelCase : List[str] = inputs['''input_ids'''].shape
for i in range(A_ ):
if config.new_decoder_architecture:
__lowerCAmelCase : List[Any] = config.num_attention_heads
elif config.multi_query:
__lowerCAmelCase : int = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __lowercase (unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : str = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
__lowerCAmelCase : List[Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(A_ )
__lowerCAmelCase : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
__lowerCAmelCase : Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
__lowerCAmelCase : Tuple = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 )
__lowerCAmelCase : Optional[int] = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
@slow
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ )
__lowerCAmelCase : Optional[Any] = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(A_ )
__lowerCAmelCase : Any = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , num_beams=2 , max_new_tokens=4 )
@slow
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained(A_ )
__lowerCAmelCase : Tuple = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(device=A_ )
__lowerCAmelCase : Dict = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# Test results are the same with and without cache
__lowerCAmelCase : Dict = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
__lowerCAmelCase : List[str] = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 275
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : List[Any] = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : List[str] = relative_attention
__lowerCAmelCase : Union[str, Any] = position_biased_input
__lowerCAmelCase : int = pos_att_type
__lowerCAmelCase : List[Any] = scope
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_config()
__lowerCAmelCase : Dict = 300
return config
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : List[str] = model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.num_labels
__lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : int = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Tuple = config_and_inputs
__lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : int = DebertaModelTester(self )
__lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
pass
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0]
# compare the actual values for a slice.
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 275
| 1
|
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase : str = mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCamelCase : Union[str, Any] = max(
mf_knapsack(i - 1 , snake_case_ , snake_case_ , snake_case_ ) , mf_knapsack(i - 1 , snake_case_ , snake_case_ , j - wt[i - 1] ) + val[i - 1] , )
UpperCamelCase : List[str] = val
return f[i][j]
def a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
UpperCamelCase : Optional[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCamelCase : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCamelCase : Dict = dp[i - 1][w_]
return dp[n][w_], dp
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ):
if not (isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCamelCase : int = len(snake_case_ )
if num_items != len(snake_case_ ):
UpperCamelCase : Any = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(snake_case_ )} values"""
)
raise ValueError(snake_case_ )
for i in range(snake_case_ ):
if not isinstance(wt[i] , snake_case_ ):
UpperCamelCase : List[Any] = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(snake_case_ )
UpperCamelCase : Union[str, Any] = knapsack(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase : set = set()
_construct_solution(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return optimal_val, example_optional_set
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case_ , snake_case_ , i - 1 , snake_case_ , snake_case_ )
else:
optimal_set.add(snake_case_ )
_construct_solution(snake_case_ , snake_case_ , i - 1 , j - wt[i - 1] , snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase : str = [3, 2, 4, 4]
__UpperCAmelCase : Optional[int] = [4, 3, 2, 3]
__UpperCAmelCase : Any = 4
__UpperCAmelCase : List[str] = 6
__UpperCAmelCase : Dict = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__UpperCAmelCase : Any = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__UpperCAmelCase : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 355
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 0
|
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = {}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE : List[str] = {}
self.num_vertices += 1
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
self.add_vertex(A )
self.add_vertex(A )
if head == tail:
return
SCREAMING_SNAKE_CASE : List[Any] = weight
SCREAMING_SNAKE_CASE : Optional[Any] = weight
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = edge
edges.remove((tail, head, weight) )
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(edges[i] )
edges.sort(key=lambda A : e[2] )
for i in range(len(A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE : Dict = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = edge
SCREAMING_SNAKE_CASE : Union[str, Any] = weight
SCREAMING_SNAKE_CASE : Optional[Any] = weight
def __str__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip('\n' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def UpperCamelCase_ ( A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE : Optional[Any] = []
if edges is None:
SCREAMING_SNAKE_CASE : List[Any] = []
for vertex in vertices:
g.add_vertex(A )
for edge in edges:
g.add_edge(*A )
return g
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : str = {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if item in self.parent:
return self.find(A )
SCREAMING_SNAKE_CASE : Dict = item
SCREAMING_SNAKE_CASE : Tuple = 0
return item
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(A )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE : Tuple = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.find(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.find(A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE : Tuple = roota
return roota
return None
@staticmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = graph.num_vertices
SCREAMING_SNAKE_CASE : List[str] = Graph.UnionFind()
SCREAMING_SNAKE_CASE : Dict = []
while num_components > 1:
SCREAMING_SNAKE_CASE : List[Any] = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE : str = -1
SCREAMING_SNAKE_CASE : Tuple = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = edge
SCREAMING_SNAKE_CASE : Optional[int] = union_find.find(A )
SCREAMING_SNAKE_CASE : int = union_find.find(A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE : str = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE : Optional[int] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = cheap_edge[vertex]
if union_find.find(A ) != union_find.find(A ):
union_find.union(A, A )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE : Dict = num_components - 1
SCREAMING_SNAKE_CASE : Tuple = Graph.build(edges=A )
return mst
| 251
|
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = {}
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE : List[str] = {}
self.num_vertices += 1
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
self.add_vertex(A )
self.add_vertex(A )
if head == tail:
return
SCREAMING_SNAKE_CASE : List[Any] = weight
SCREAMING_SNAKE_CASE : Optional[Any] = weight
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = edge
edges.remove((tail, head, weight) )
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = list(edges[i] )
edges.sort(key=lambda A : e[2] )
for i in range(len(A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE : Dict = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = edge
SCREAMING_SNAKE_CASE : Union[str, Any] = weight
SCREAMING_SNAKE_CASE : Optional[Any] = weight
def __str__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip('\n' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def UpperCamelCase_ ( A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE : Optional[Any] = []
if edges is None:
SCREAMING_SNAKE_CASE : List[Any] = []
for vertex in vertices:
g.add_vertex(A )
for edge in edges:
g.add_edge(*A )
return g
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : str = {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if item in self.parent:
return self.find(A )
SCREAMING_SNAKE_CASE : Dict = item
SCREAMING_SNAKE_CASE : Tuple = 0
return item
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(A )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE : Tuple = self.find(self.parent[item] )
return self.parent[item]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.find(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.find(A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE : Tuple = roota
return roota
return None
@staticmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = graph.num_vertices
SCREAMING_SNAKE_CASE : List[str] = Graph.UnionFind()
SCREAMING_SNAKE_CASE : Dict = []
while num_components > 1:
SCREAMING_SNAKE_CASE : List[Any] = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE : str = -1
SCREAMING_SNAKE_CASE : Tuple = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = edge
SCREAMING_SNAKE_CASE : Optional[int] = union_find.find(A )
SCREAMING_SNAKE_CASE : int = union_find.find(A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE : str = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE : Optional[int] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = cheap_edge[vertex]
if union_find.find(A ) != union_find.find(A ):
union_find.union(A, A )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE : Dict = num_components - 1
SCREAMING_SNAKE_CASE : Tuple = Graph.build(edges=A )
return mst
| 251
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=32 , __snake_case=3 , __snake_case=4 , __snake_case=[10, 20, 30, 40] , __snake_case=[2, 2, 3, 2] , __snake_case=True , __snake_case=True , __snake_case=37 , __snake_case="gelu" , __snake_case=10 , __snake_case=0.02 , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=None , ) -> Union[str, Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =image_size
__a =num_channels
__a =num_stages
__a =hidden_sizes
__a =depths
__a =is_training
__a =use_labels
__a =intermediate_size
__a =hidden_act
__a =num_labels
__a =initializer_range
__a =out_features
__a =out_indices
__a =scope
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.num_labels )
__a =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> str:
'''simple docstring'''
__a =ConvNextModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =ConvNextForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =ConvNextBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a =None
__a =ConvNextBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
__a , __a , __a =config_and_inputs
__a ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =ConvNextModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a , __a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =model_class(__snake_case )
__a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a =[*signature.parameters.keys()]
__a =['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
__a =model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
__a =model(**self._prepare_for_class(__snake_case , __snake_case ) )
__a =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a =self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a =True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a =True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =ConvNextModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(__snake_case )
__a =self.default_image_processor
__a =prepare_img()
__a =image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
__a =model(**__snake_case )
# verify the logits
__a =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
__a =torch.tensor([-0.0260, -0.4739, 0.1911] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = ConvNextConfig
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =ConvNextModelTester(self )
| 308
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def A ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 28
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : str = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ) -> Optional[int]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Any = features.copy() if features else default_expected_features
UpperCAmelCase : List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Dict = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> Tuple:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Optional[int] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : Any = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase : Tuple = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCAmelCase : List[str] = features.copy()
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : List[str] = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Optional[Any]:
UpperCAmelCase : Any = tmp_path / '''cache'''
UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : List[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Dict:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : str = jsonl_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Dict = [jsonl_path]
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=("train",) ) -> Union[str, Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
UpperCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ) -> Any:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Optional[int] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = JsonDatasetReader({'''train''': jsonl_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
if split:
UpperCAmelCase : Optional[int] = {split: jsonl_path}
else:
UpperCAmelCase : Any = '''train'''
UpperCAmelCase : Any = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCAmelCase : Tuple = tmp_path / '''cache'''
UpperCAmelCase : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : Optional[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
return json.load(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Dict ) -> str:
return [json.loads(_lowerCAmelCase ) for line in buffer]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any] ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def A ( self : str , __snake_case : str , __snake_case : str , __snake_case : int ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : Any = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A ( self : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : List[str] = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def A ( self : List[Any] , __snake_case : str ) -> Dict:
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def A ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Union[str, Any]:
UpperCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}"""
UpperCAmelCase : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : str = f.read()
with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f:
UpperCAmelCase : Optional[int] = f.read()
assert exported_content == original_content
| 23
| 0
|
from pathlib import Path
import fire
from tqdm import tqdm
def A_ ( _lowerCAmelCase="ro" , _lowerCAmelCase="en" , _lowerCAmelCase="wmt16" , _lowerCAmelCase=None ) -> List[str]:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
UpperCamelCase : int = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
UpperCamelCase : Optional[int] = datasets.load_dataset(_a , _a )
if save_dir is None:
UpperCamelCase : List[str] = F"""{dataset}-{pair}"""
UpperCamelCase : int = Path(_a )
save_dir.mkdir(exist_ok=_a )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
UpperCamelCase : Tuple = """val""" if split == """validation""" else split
UpperCamelCase : str = save_dir.joinpath(F"""{fn}.source""" )
UpperCamelCase : Optional[int] = save_dir.joinpath(F"""{fn}.target""" )
UpperCamelCase : int = src_path.open("w+" )
UpperCamelCase : Any = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase : Dict = x["""translation"""]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 365
|
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : List[str] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCamelCase : Optional[int] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Tuple = spearmanr(A_ , A_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 140
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase_ ( snake_case_ : Namespace ) -> Any:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_A : Optional[Any] = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Optional[int]:
__lowerCAmelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Any , ) -> Any:
__lowerCAmelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
__lowerCAmelCase = model_type
__lowerCAmelCase = tf_checkpoint
__lowerCAmelCase = pytorch_dump_output
__lowerCAmelCase = config
__lowerCAmelCase = finetuning_task_name
def a ( self : Optional[int] ) -> Any:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
if "ckpt" in self._tf_checkpoint.lower():
__lowerCAmelCase = self._tf_checkpoint
__lowerCAmelCase = """"""
else:
__lowerCAmelCase = self._tf_checkpoint
__lowerCAmelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE__ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 229
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
| 1
|
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 173
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__a = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__a = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.1_5},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__a = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__a = '''allenai'''
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Tuple = dict((re.sub(R'''@@$''', '''''', _UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''', '''</w>''', _UpperCamelCase ), v) for k, v in d.items() )
lowercase : List[str] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowercase : Union[str, Any] = d[k] # restore
return da
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowercase : Union[str, Any] = basename(_UpperCamelCase )
lowercase : List[str] = dirname(_UpperCamelCase )
lowercase : Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase : List[str] = cls.hub_models()
lowercase : Tuple = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
lowercase : List[str] = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
lowercase : int = hub_utils.from_pretrained(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, archive_map=_UpperCamelCase, **_UpperCamelCase )
lowercase : int = vars(chkpt['''args''']['''model'''] )
lowercase : Union[str, Any] = args['''source_lang''']
lowercase : Dict = args['''target_lang''']
lowercase : Optional[int] = dirname(_UpperCamelCase )
lowercase : str = basename(_UpperCamelCase )
# dicts
lowercase : Optional[Any] = os.path.join(_UpperCamelCase, f"""dict.{src_lang}.txt""" )
lowercase : Any = os.path.join(_UpperCamelCase, f"""dict.{tgt_lang}.txt""" )
lowercase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
lowercase : List[Any] = rewrite_dict_keys(src_dict.indices )
lowercase : List[str] = len(_UpperCamelCase )
lowercase : Tuple = os.path.join(_UpperCamelCase, '''vocab-src.json''' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase : str = True
for k in src_vocab.keys():
if not k.islower():
lowercase : Dict = False
break
lowercase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
lowercase : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase : Tuple = len(_UpperCamelCase )
lowercase : Dict = os.path.join(_UpperCamelCase, '''vocab-tgt.json''' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# merges_file (bpecodes)
lowercase : Optional[int] = os.path.join(_UpperCamelCase, VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase : str = os.path.join(_UpperCamelCase, _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase, encoding='''utf-8''' ) as fin:
lowercase : List[str] = fin.read()
lowercase : Tuple = re.sub(R''' \d+$''', '''''', _UpperCamelCase, 0, re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as fout:
fout.write(_UpperCamelCase )
# model config
lowercase : Dict = os.path.join(_UpperCamelCase, '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
lowercase : Optional[int] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.0_2,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
lowercase : Dict = 5
lowercase : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase : int = best_score_hparams[model_dir]['''length_penalty''']
else:
lowercase : Any = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# tokenizer config
lowercase : Any = os.path.join(_UpperCamelCase, _UpperCamelCase )
lowercase : Tuple = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# model
lowercase : int = chkpt['''models'''][0]
lowercase : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase : Union[str, Any] = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase : int = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase, _UpperCamelCase )
lowercase : str = FSMTConfig.from_pretrained(_UpperCamelCase )
lowercase : str = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
# save
lowercase : List[Any] = os.path.join(_UpperCamelCase, _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase, _UpperCamelCase )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 173
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: Tuple, a_: List[str], a_: List[str] ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a_, n - 1, a_ ) * a) % mod
else:
_UpperCAmelCase : Any = binary_exponentiation(a_, n / 2, a_ )
return (b * b) % mod
# a prime number
__a = 701
__a = 1_000_000_000
__a = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 145
|
'''simple docstring'''
def __UpperCAmelCase ( a_: str, a_: str ):
if len(a_ ) != len(a_ ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : Dict = 0
for chara, chara in zip(a_, a_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 1
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = HfArgumentParser(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
__SCREAMING_SNAKE_CASE = TensorFlowBenchmark(args=UpperCamelCase_ )
try:
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__SCREAMING_SNAKE_CASE = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
__SCREAMING_SNAKE_CASE = """ """.join(str(UpperCamelCase_ ).split(""" """ )[:-1] )
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = eval(str(UpperCamelCase_ ).split(""" """ )[-1] )
__SCREAMING_SNAKE_CASE = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = full_error_msg + begin_error_msg + str(UpperCamelCase_ )
raise ValueError(UpperCamelCase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 354
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
while b:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = b, a % b
return a
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase_ , a % b )
def _lowerCAmelCase ( ):
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 255
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int | None = None , _SCREAMING_SNAKE_CASE : int | None = None ):
'''simple docstring'''
if start is None:
_UpperCAmelCase = 0
if end is None:
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1
if start >= end:
return
_UpperCAmelCase = (start + end) // 2
slowsort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
slowsort(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE )
if sequence[end] < sequence[mid]:
_UpperCAmelCase , _UpperCAmelCase = sequence[mid], sequence[end]
slowsort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__A : Union[str, Any] = "\\n\n"
__A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : List[Any] )->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = '''cuda'''
else:
_UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model.to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
_UpperCAmelCase = encodings['''input_ids''']
_UpperCAmelCase = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 260
| 1
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__ ( __A ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> np.ndarray:
_snake_case = np.nan
for i in range(__A ):
_snake_case = features[:, labels == i]
_snake_case = data.mean(1 )
# Centralize the data of class i
_snake_case = data - column_reshape(__A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case = np.dot(__A , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> np.ndarray:
_snake_case = features.mean(1 )
_snake_case = np.nan
for i in range(__A ):
_snake_case = features[:, labels == i]
_snake_case = data.shape[1]
_snake_case = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case = device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
_snake_case = features.mean(1 )
# Center the dataset
_snake_case = features - np.reshape(__A , (data_mean.size, 1) )
_snake_case = np.dot(__A , centered_data.T ) / features.shape[1]
_snake_case , _snake_case = np.linalg.eigh(__A )
# Take all the columns in the reverse order (-1), and then takes only the first
_snake_case = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_snake_case = np.dot(filtered_eigenvectors.T , __A )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__A )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_snake_case , _snake_case = eigh(
covariance_between_classes(__A , __A , __A ) , covariance_within_classes(__A , __A , __A ) , )
_snake_case = eigenvectors[:, ::-1][:, :dimensions]
_snake_case , _snake_case , _snake_case = np.linalg.svd(__A )
_snake_case = svd_matrix[:, 0:dimensions]
_snake_case = np.dot(filtered_svd_matrix.T , __A )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__A )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
_snake_case = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_snake_case = np.array([0, 0, 0, 1, 1] )
_snake_case = 2
_snake_case = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__A ) as error_info:
_snake_case = linear_discriminant_analysis(
__A , __A , __A , __A )
if isinstance(__A , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_snake_case = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_snake_case = 2
_snake_case = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__A ) as error_info:
_snake_case = principal_component_analysis(__A , __A )
if not np.allclose(__A , __A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
|
'''simple docstring'''
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = name
_snake_case = value
_snake_case = weight
def __repr__( self ):
"""simple docstring"""
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase ( self ):
"""simple docstring"""
return self.value
def lowerCamelCase ( self ):
"""simple docstring"""
return self.name
def lowerCamelCase ( self ):
"""simple docstring"""
return self.weight
def lowerCamelCase ( self ):
"""simple docstring"""
return self.value / self.weight
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> int:
_snake_case = []
for i in range(len(__A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[int]:
_snake_case = sorted(__A , key=__A , reverse=__A )
_snake_case = []
_snake_case , _snake_case = 0.0, 0.0
for i in range(len(__A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : int = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : str = size_divisor
snake_case_ : Optional[Any] = do_rescale
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = GLPNImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) )
self.assertTrue(hasattr(__magic_name__ , '''resample''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 279
| 1
|
import cva
import numpy as np
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return str(self.k )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = cva.imread(lowerCamelCase__ ,0 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = img.shape
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = img.copy()
SCREAMING_SNAKE_CASE = cva.cvtColor(lowerCamelCase__ ,cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = np.gradient(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = dx**2
SCREAMING_SNAKE_CASE = dy**2
SCREAMING_SNAKE_CASE = dx * dy
SCREAMING_SNAKE_CASE = 0.04
SCREAMING_SNAKE_CASE = self.window_size // 2
for y in range(lowerCamelCase__ ,h - offset ):
for x in range(lowerCamelCase__ ,w - offset ):
SCREAMING_SNAKE_CASE = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE = wxx + wyy
SCREAMING_SNAKE_CASE = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,255 )
return color_img, corner_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = HarrisCorner(0.04, 3)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 353
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self : int ) -> Tuple:
_lowerCAmelCase = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_lowerCAmelCase = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowerCAmelCase = model(__snake_case )["""last_hidden_state"""]
_lowerCAmelCase = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice.
_lowerCAmelCase = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 70
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 0
|
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowerCamelCase = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b"
lowerCamelCase = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b"
lowerCamelCase = max(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase__ ) , b_binary.zfill(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66
|
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = 1
while len(lowerCamelCase__ ) < 1E6:
constant.append(str(lowerCamelCase__ ) )
i += 1
lowerCamelCase = """""".join(lowerCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 66
| 1
|
'''simple docstring'''
from datetime import datetime
import requests
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__lowercase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(A__ ).content
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter Video/IGTV url: ''').strip()
lowerCAmelCase__ = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f'Done. Video saved to disk as {file_name}.')
| 104
|
"""simple docstring"""
_UpperCamelCase: Dict = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
_UpperCamelCase: Optional[int] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
_UpperCamelCase: int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
_UpperCamelCase: List[str] = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
_UpperCamelCase: Any = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
_UpperCamelCase: str = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
_UpperCamelCase: Optional[Any] = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
_UpperCamelCase: Optional[int] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 255
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class a ( UpperCAmelCase ):
_lowercase = "facebook/nllb-200-distilled-600M"
_lowercase = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
_lowercase = "translator"
_lowercase = AutoTokenizer
_lowercase = AutoModelForSeqaSeqLM
_lowercase = LANGUAGE_CODES
_lowercase = ["text", "text", "text"]
_lowercase = ["text"]
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_UpperCAmelCase : int = self.lang_to_code[src_lang]
_UpperCAmelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors="pt" , src_lang=A_ , tgt_lang=A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 369
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class a ( UpperCAmelCase ):
_lowercase = "facebook/nllb-200-distilled-600M"
_lowercase = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
_lowercase = "translator"
_lowercase = AutoTokenizer
_lowercase = AutoModelForSeqaSeqLM
_lowercase = LANGUAGE_CODES
_lowercase = ["text", "text", "text"]
_lowercase = ["text"]
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_UpperCAmelCase : int = self.lang_to_code[src_lang]
_UpperCAmelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors="pt" , src_lang=A_ , tgt_lang=A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 189
| 0
|
'''simple docstring'''
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__UpperCamelCase = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
a__ : Tuple =701
a__ : Union[str, Any] =1_000_000_000
a__ : List[Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 53
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase =random.Random()
if is_torch_available():
import torch
def _A ( _a : int , _a : Optional[int]=1.0 , _a : List[str]=None , _a : List[str]=None ):
"""simple docstring"""
if rng is None:
A = global_rng
A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=7 ,lowerCamelCase_=4_0_0 ,lowerCamelCase_=2_0_0_0 ,lowerCamelCase_=1 ,lowerCamelCase_=0.0 ,lowerCamelCase_=1_6_0_0_0 ,lowerCamelCase_=True ,lowerCamelCase_=True ,) -> Optional[int]:
A = parent
A = batch_size
A = min_seq_length
A = max_seq_length
A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A = feature_size
A = padding_value
A = sampling_rate
A = return_attention_mask
A = do_normalize
def UpperCamelCase__ ( self ) -> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self ,lowerCamelCase_=False ,lowerCamelCase_=False ) -> List[str]:
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*__snake_case ) )
if equal_length:
A = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
A = [np.asarray(__snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase__ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = ASTFeatureExtractor
def UpperCamelCase__ ( self ) -> Tuple:
A = ASTFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
A = [np.asarray(__snake_case ) for speech_input in speech_inputs]
# Test not batched input
A = feat_extract(speech_inputs[0] ,return_tensors="""np""" ).input_values
A = feat_extract(np_speech_inputs[0] ,return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__snake_case ,__snake_case ,atol=1E-3 ) )
# Test batched
A = feat_extract(__snake_case ,padding=__snake_case ,return_tensors="""np""" ).input_values
A = feat_extract(__snake_case ,padding=__snake_case ,return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case ,__snake_case ):
self.assertTrue(np.allclose(__snake_case ,__snake_case ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
A = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A = np.asarray(__snake_case )
A = feat_extract(__snake_case ,return_tensors="""np""" ).input_values
A = feat_extract(__snake_case ,return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__snake_case ,__snake_case ):
self.assertTrue(np.allclose(__snake_case ,__snake_case ,atol=1E-3 ) )
@require_torch
def UpperCamelCase__ ( self ) -> Union[str, Any]:
import torch
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A = np.random.rand(1_0_0 ).astype(np.floataa )
A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A = feature_extractor.pad([{"""input_values""": inputs}] ,return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A = feature_extractor.pad([{"""input_values""": inputs}] ,return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Any:
from datasets import load_dataset
A = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
A = ds.sort("""id""" ).select(range(__snake_case ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase__ ( self ) -> Any:
# fmt: off
A = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
A = self._load_datasamples(1 )
A = ASTFeatureExtractor()
A = feature_extractor(__snake_case ,return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape ,(1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] ,__snake_case ,atol=1E-4 ) )
| 370
|
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77
| 0
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase__ : Tuple = datasets.load_iris()
lowercase__ : Union[str, Any] = np.array(data['data'])
lowercase__ : List[str] = np.array(data['target'])
lowercase__ : Tuple = data['target_names']
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = train_test_split(X, y)
def a__ ( lowercase : Optional[Any], lowercase : int ) -> Dict:
"""simple docstring"""
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def a__ ( lowercase : List[str], lowercase : Optional[Any], lowercase : List[str], lowercase : Union[str, Any], lowercase : Optional[int]=5 ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = zip(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
_UpperCamelCase = []
for data_point in data:
_UpperCamelCase = euclidean_distance(data_point[0], _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCamelCase = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCamelCase = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 324
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = VideoToVideoSDPipeline
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase_ = False
# No `output_type`.
UpperCAmelCase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def snake_case_ (self ) -> List[Any]:
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
UpperCamelCase = CLIPTextModel(__a )
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def snake_case_ (self , __a , __a=0 ) -> Dict:
# 3 frames
UpperCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(__a )
else:
UpperCamelCase = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = VideoToVideoSDPipeline(**__a )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = self.get_dummy_inputs(__a )
UpperCamelCase = "np"
UpperCamelCase = sd_pipe(**__a ).frames
UpperCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCamelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case_ (self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def snake_case_ (self ) -> List[Any]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def snake_case_ (self ) -> Optional[Any]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def snake_case_ (self ) -> Dict:
pass
def snake_case_ (self ) -> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[str]:
UpperCamelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=__a )
UpperCamelCase = video.to("cuda" )
UpperCamelCase = "Spiderman is surfing"
UpperCamelCase = pipe(__a , video=__a , generator=__a , num_inference_steps=3 , output_type="pt" ).frames
UpperCamelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 153
| 0
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase__ = '''true'''
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=82 , _SCREAMING_SNAKE_CASE=16 ):
"""simple docstring"""
set_seed(42 )
UpperCamelCase = RegressionModel()
UpperCamelCase = deepcopy(_SCREAMING_SNAKE_CASE )
UpperCamelCase = RegressionDataset(length=_SCREAMING_SNAKE_CASE )
UpperCamelCase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
UpperCamelCase , UpperCamelCase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
UpperCamelCase = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
UpperCamelCase = dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
UpperCamelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=16 )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = Accelerator(dispatch_batches=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE )
UpperCamelCase = get_dataloader(_SCREAMING_SNAKE_CASE , not dispatch_batches )
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
for batch in dataloader:
UpperCamelCase , UpperCamelCase = batch.values()
with torch.no_grad():
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCamelCase , UpperCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_SCREAMING_SNAKE_CASE )
targs.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = torch.cat(_SCREAMING_SNAKE_CASE ), torch.cat(_SCREAMING_SNAKE_CASE )
return logits, targs
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=82 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=16 ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_basic_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = generate_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert (
len(_SCREAMING_SNAKE_CASE ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_SCREAMING_SNAKE_CASE )}"
def a__ ( _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
UpperCamelCase = evaluate.load("glue" , "mrpc" )
UpperCamelCase , UpperCamelCase = get_mrpc_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# First do baseline
UpperCamelCase , UpperCamelCase , UpperCamelCase = setup["no"]
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(_SCREAMING_SNAKE_CASE )
with torch.inference_mode():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_SCREAMING_SNAKE_CASE , references=batch["labels"] )
UpperCamelCase = metric.compute()
# Then do distributed
UpperCamelCase , UpperCamelCase , UpperCamelCase = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase = batch["labels"]
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
UpperCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def a__ ( ):
"""simple docstring"""
UpperCamelCase = Accelerator(split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCamelCase = Accelerator(split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(_SCREAMING_SNAKE_CASE , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
UpperCamelCase = Accelerator()
test_torch_metrics(_SCREAMING_SNAKE_CASE , 512 )
accelerator.state._reset_state()
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 244
|
"""simple docstring"""
import math
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = 0
while num > 0:
UpperCamelCase = num % 8
UpperCamelCase = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) ))
counter += 1
UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_SCREAMING_SNAKE_CASE )}"
def a__ ( ):
"""simple docstring"""
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 244
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__UpperCamelCase = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def UpperCAmelCase ( ) -> Dict:
snake_case_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case_ = bs[:]
snake_case_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase )
cs.append(2**8 + n )
n += 1
snake_case_ = [chr(UpperCAmelCase ) for n in cs]
return dict(zip(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = set()
snake_case_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ = char
return pairs
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Union[str, Any]:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else bos_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else eos_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else sep_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else cls_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else unk_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, **lowerCAmelCase__, )
with open(lowerCAmelCase__, encoding='utf-8') as vocab_handle:
snake_case_ = json.load(lowerCAmelCase__)
snake_case_ = {v: k for k, v in self.encoder.items()}
snake_case_ = errors # how to handle errors in decoding
snake_case_ = bytes_to_unicode()
snake_case_ = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__, encoding='utf-8') as merges_handle:
snake_case_ = merges_handle.read().split('\n')[1:-1]
snake_case_ = [tuple(merge.split()) for merge in bpe_merges]
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = {}
snake_case_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def a_ ( self) -> Optional[int]:
return len(self.encoder)
def a_ ( self) -> Optional[Any]:
return dict(self.encoder, **self.added_tokens_encoder)
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
snake_case_ = tuple(lowerCAmelCase__)
snake_case_ = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
snake_case_ = min(lowerCAmelCase__, key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__, float('inf')))
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ = bigram
snake_case_ = []
snake_case_ = 0
while i < len(lowerCAmelCase__):
try:
snake_case_ = word.index(lowerCAmelCase__, lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
snake_case_ = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
snake_case_ = tuple(lowerCAmelCase__)
snake_case_ = new_word
if len(lowerCAmelCase__) == 1:
break
else:
snake_case_ = get_pairs(lowerCAmelCase__)
snake_case_ = ' '.join(lowerCAmelCase__)
snake_case_ = word
return word
def a_ ( self, lowerCAmelCase__) -> Dict:
snake_case_ = []
for token in re.findall(self.pat, lowerCAmelCase__):
snake_case_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(' '))
return bpe_tokens
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.encoder.get(lowerCAmelCase__, self.encoder.get(self.unk_token))
def a_ ( self, lowerCAmelCase__) -> int:
return self.decoder.get(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = ''.join(lowerCAmelCase__)
snake_case_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase__, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCAmelCase__, ensure_ascii=lowerCAmelCase__) + '\n')
snake_case_ = 0
with open(lowerCAmelCase__, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
snake_case_ = token_index
writer.write(' '.join(lowerCAmelCase__) + '\n')
index += 1
return vocab_file, merge_file
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=False, **lowerCAmelCase__) -> int:
snake_case_ = kwargs.pop('add_prefix_space', self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
snake_case_ = ' ' + text
return (text, kwargs)
| 69
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowercase = True
except ImportError:
__lowercase = False
try:
from torch.hub import _get_torch_home
__lowercase = _get_torch_home()
except ImportError:
__lowercase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
__lowercase = os.path.join(torch_cache_home, """transformers""")
__lowercase = """https://cdn.huggingface.co"""
__lowercase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
__lowercase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
__lowercase = os.path.join(PATH, """config.yaml""")
__lowercase = os.path.join(PATH, """attributes.txt""")
__lowercase = os.path.join(PATH, """objects.txt""")
__lowercase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
__lowercase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
__lowercase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
__lowercase = """pytorch_model.bin"""
__lowercase = """config.yaml"""
def lowercase ( A_=OBJECTS , A_=ATTRIBUTES )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
a : Union[str, Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Dict = OrderedDict()
with open(A_ , "rb" ) as f:
a : Optional[Any] = pkl.load(A_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
a : Dict = ckp.pop(A_ )
if isinstance(A_ , np.ndarray ):
a : Optional[Any] = torch.tensor(A_ )
else:
assert isinstance(A_ , torch.tensor ), type(A_ )
a : int = v
return r
class _A :
"""simple docstring"""
UpperCAmelCase : int = {}
def __init__( self : Any , __UpperCAmelCase : dict , __UpperCAmelCase : str = "root" , __UpperCAmelCase : Optional[int]=0):
a : List[str] = name
a : Tuple = level
a : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a : List[Any] = copy.deepcopy(__UpperCAmelCase)
a : int = copy.deepcopy(__UpperCAmelCase)
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Union[str, Any] = Config(__UpperCAmelCase , name=__UpperCAmelCase , level=level + 1)
a : Dict = v
setattr(self , __UpperCAmelCase , __UpperCAmelCase)
a : Tuple = d
def __repr__( self : List[str]):
return str(list((self._pointer.keys())))
def __setattr__( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : Optional[Any] = val
a : Tuple = val
a : Dict = key.split(".")
a : Union[str, Any] = len(__UpperCAmelCase) - 1
a : Optional[int] = self._pointer
if len(__UpperCAmelCase) > 1:
for i, l in enumerate(__UpperCAmelCase):
if hasattr(self , __UpperCAmelCase) and isinstance(getattr(self , __UpperCAmelCase) , __UpperCAmelCase):
setattr(getattr(self , __UpperCAmelCase) , ".".join(levels[i:]) , __UpperCAmelCase)
if l == last_level:
a : int = val
else:
a : str = pointer[l]
def __snake_case ( self : str):
return self._pointer
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any]):
with open(f'''{file_name}''' , "w") as stream:
dump(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int):
with open(f'''{file_name}''' , "w") as stream:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : Dict):
with open(__UpperCAmelCase) as stream:
a : List[str] = load(__UpperCAmelCase , Loader=__UpperCAmelCase)
return data
def __str__( self : Tuple):
a : str = " "
if self._name != "root":
a : List[str] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
a : Optional[Any] = ""
a : List[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(__UpperCAmelCase).__name__})\n'''
a : Tuple = level
return r[:-1]
@classmethod
def __snake_case ( cls : str , __UpperCAmelCase : str , **__UpperCAmelCase : List[Any]):
a , a : Tuple = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
return cls(__UpperCAmelCase)
@classmethod
def __snake_case ( cls : Union[str, Any] , __UpperCAmelCase : str , **__UpperCAmelCase : List[str]):
a : int = kwargs.pop("cache_dir" , __UpperCAmelCase)
a : List[Any] = kwargs.pop("force_download" , __UpperCAmelCase)
a : Optional[int] = kwargs.pop("resume_download" , __UpperCAmelCase)
a : Tuple = kwargs.pop("proxies" , __UpperCAmelCase)
a : int = kwargs.pop("local_files_only" , __UpperCAmelCase)
if os.path.isdir(__UpperCAmelCase):
a : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase)
elif os.path.isfile(__UpperCAmelCase) or is_remote_url(__UpperCAmelCase):
a : List[Any] = pretrained_model_name_or_path
else:
a : int = hf_bucket_url(__UpperCAmelCase , filename=__UpperCAmelCase , use_cdn=__UpperCAmelCase)
try:
# Load from URL or cache if already cached
a : Optional[Any] = cached_path(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a : Union[str, Any] = Config.load_yaml(__UpperCAmelCase)
except EnvironmentError:
a : str = "Can't load config for"
raise EnvironmentError(__UpperCAmelCase)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(__UpperCAmelCase), kwargs
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Tuple = torch.load("dump.pt" , map_location=in_tensor.device )
a : Any = in_tensor.numpy()
a : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = urlparse(A_ )
return parsed.scheme in ("http", "https")
def lowercase ( A_ , A_ , A_=True )-> str:
'''simple docstring'''
a : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a : str = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase ( A_ , A_ , A_=None , A_=0 , A_=None , )-> List[str]:
'''simple docstring'''
a : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A_ , A_ ):
ua += "; " + "; ".join("{}/{}".format(A_ , A_ ) for k, v in user_agent.items() )
elif isinstance(A_ , A_ ):
ua += "; " + user_agent
a : str = {"user-agent": ua}
if resume_size > 0:
a : List[Any] = "bytes=%d-" % (resume_size,)
a : str = requests.get(A_ , stream=A_ , proxies=A_ , headers=A_ )
if response.status_code == 416: # Range not satisfiable
return
a : Optional[int] = response.headers.get("Content-Length" )
a : List[Any] = resume_size + int(A_ ) if content_length is not None else None
a : List[Any] = tqdm(
unit="B" , unit_scale=A_ , total=A_ , initial=A_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A_ ) )
temp_file.write(A_ )
progress.close()
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=10 , A_=False , A_=None , A_=False , )-> str:
'''simple docstring'''
if cache_dir is None:
a : List[Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : Tuple = str(A_ )
os.makedirs(A_ , exist_ok=A_ )
a : Optional[Any] = None
if not local_files_only:
try:
a : Dict = requests.head(A_ , allow_redirects=A_ , proxies=A_ , timeout=A_ )
if response.status_code == 200:
a : int = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a : List[str] = url_to_filename(A_ , A_ )
# get cache path to put the file
a : List[str] = os.path.join(A_ , A_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A_ ):
return cache_path
else:
a : Any = [
file
for file in fnmatch.filter(os.listdir(A_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A_ ) > 0:
return os.path.join(A_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a : Dict = cache_path + ".lock"
with FileLock(A_ ):
# If the download just completed while the lock was activated.
if os.path.exists(A_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a : Optional[Any] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A_ , "a+b" ) as f:
yield f
a : Tuple = _resumable_file_manager
if os.path.exists(A_ ):
a : Optional[Any] = os.stat(A_ ).st_size
else:
a : Optional[int] = 0
else:
a : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=A_ , delete=A_ )
a : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A_ , temp_file.name , )
http_get(
A_ , A_ , proxies=A_ , resume_size=A_ , user_agent=A_ , )
os.replace(temp_file.name , A_ )
a : List[str] = {"url": url, "etag": etag}
a : Tuple = cache_path + ".json"
with open(A_ , "w" ) as meta_file:
json.dump(A_ , A_ )
return cache_path
def lowercase ( A_ , A_=None )-> Any:
'''simple docstring'''
a : Dict = url.encode("utf-8" )
a : Optional[Any] = shaaaa(A_ )
a : Any = url_hash.hexdigest()
if etag:
a : Union[str, Any] = etag.encode("utf-8" )
a : Tuple = shaaaa(A_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=False , A_=None , A_=False , A_=False , A_=False , )-> Tuple:
'''simple docstring'''
if cache_dir is None:
a : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : List[Any] = str(A_ )
if isinstance(A_ , A_ ):
a : int = str(A_ )
if is_remote_url(A_ ):
# URL, so get it from the cache (downloading if necessary)
a : Optional[Any] = get_from_cache(
A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , user_agent=A_ , local_files_only=A_ , )
elif os.path.exists(A_ ):
# File, and it exists.
a : Union[str, Any] = url_or_filename
elif urlparse(A_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A_ ) )
if extract_compressed_file:
if not is_zipfile(A_ ) and not tarfile.is_tarfile(A_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a , a : Dict = os.path.split(A_ )
a : List[str] = output_file.replace("." , "-" ) + "-extracted"
a : Optional[Any] = os.path.join(A_ , A_ )
if os.path.isdir(A_ ) and os.listdir(A_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a : Tuple = output_path + ".lock"
with FileLock(A_ ):
shutil.rmtree(A_ , ignore_errors=A_ )
os.makedirs(A_ )
if is_zipfile(A_ ):
with ZipFile(A_ , "r" ) as zip_file:
zip_file.extractall(A_ )
zip_file.close()
elif tarfile.is_tarfile(A_ ):
a : List[str] = tarfile.open(A_ )
tar_file.extractall(A_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A_ ) )
return output_path_extracted
return output_path
def lowercase ( A_ , A_="," )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
with open(A_ ) as f:
a : str = eval(f.read() )
else:
a : List[Any] = requests.get(A_ )
try:
a : Any = requests.json()
except Exception:
a : Any = req.content.decode()
assert data is not None, "could not connect"
try:
a : Optional[Any] = eval(A_ )
except Exception:
a : Any = data.split("\n" )
req.close()
return data
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Optional[int] = requests.get(A_ )
a : List[str] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A_ )
with open(A_ , "rb" ) as stream:
a : Any = pkl.load(A_ )
a : List[str] = weights.pop("model" )
a : Dict = {}
for k, v in model.items():
a : List[str] = torch.from_numpy(A_ )
if "running_var" in k:
a : Dict = torch.tensor([0] )
a : Any = k.replace("running_var" , "num_batches_tracked" )
a : List[Any] = zero
return new
def lowercase ( )-> Optional[int]:
'''simple docstring'''
print(F'''{os.path.abspath(os.path.join(A_ , os.pardir ) )}/demo.ipynb''' )
def lowercase ( A_ , A_="RGB" )-> Any:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
a : Dict = cva.imread(A_ )
else:
a : Union[str, Any] = get_image_from_url(A_ )
assert img is not None, F'''could not connect to: {im}'''
a : int = cva.cvtColor(A_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a : List[str] = img[:, :, ::-1]
return img
def lowercase ( A_ , A_=1 )-> int:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A_ ) , A_ ))
| 226
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[int]:
_a : Union[str, Any] =[0 for i in range(len(_UpperCAmelCase ) )]
# initialize interval's left pointer and right pointer
_a , _a : List[Any] =0, 0
for i in range(1 ,len(_UpperCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
_a : Any =min(right_pointer - i + 1 ,z_result[i - left_pointer] )
_a : Dict =min_edge
while go_next(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_a , _a : Tuple =i, i + z_result[i] - 1
return z_result
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : list[int] ,_UpperCAmelCase : str ) -> bool:
return i + z_result[i] < len(_UpperCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> int:
_a : int =0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_a : List[Any] =z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "roc_bert"
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[str]=7_6_8 , SCREAMING_SNAKE_CASE :Dict=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Optional[int]="absolute" , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=9_1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE :str=2_4_8_5_8 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Tuple , ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =vocab_size
_a : List[str] =max_position_embeddings
_a : Optional[Any] =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : int =intermediate_size
_a : Any =hidden_act
_a : Dict =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : str =initializer_range
_a : Optional[int] =type_vocab_size
_a : Any =layer_norm_eps
_a : Any =use_cache
_a : Optional[int] =enable_pronunciation
_a : Optional[Any] =enable_shape
_a : Optional[Any] =pronunciation_embed_dim
_a : Tuple =pronunciation_vocab_size
_a : Union[str, Any] =shape_embed_dim
_a : Any =shape_vocab_size
_a : Tuple =concat_input
_a : List[str] =position_embedding_type
_a : List[str] =classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 276
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : int=7 ,_UpperCAmelCase : int=3 ,_UpperCAmelCase : List[str]=18 ,_UpperCAmelCase : str=30 ,_UpperCAmelCase : Tuple=400 ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Optional[Any]=None ,):
_a : int = size if size is not None else {'height': 20, 'width': 20}
_a : List[Any] = parent
_a : List[Any] = batch_size
_a : List[Any] = num_channels
_a : str = image_size
_a : Optional[Any] = min_resolution
_a : str = max_resolution
_a : List[Any] = size
_a : int = do_normalize
_a : Any = do_convert_rgb
_a : Tuple = [512, 1024, 2048, 4096]
_a : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase ( self : List[Any] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase ( self : List[Any] ):
_a : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_a : Any = Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __lowercase ( self : List[Any] ):
_a : Dict = PixaStructImageProcessingTester(self )
@property
def __lowercase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
_a : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_convert_rgb' ) )
def __lowercase ( self : Dict ):
_a : List[Any] = self.image_processor_tester.prepare_dummy_image()
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_a : List[str] = 2048
_a : Dict = image_processor(_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() ,torch.tensor(0.06_06 ) ,atol=1E-3 ,rtol=1E-3 ) )
def __lowercase ( self : Dict ):
# Initialize image_processor
_a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
_a : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Union[str, Any] = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Union[str, Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def __lowercase ( self : int ):
# Initialize image_processor
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
_a : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_a : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
_a : str = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
_a : Union[str, Any] = 'Hello'
_a : str = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ,header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Union[str, Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ,header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def __lowercase ( self : int ):
# Initialize image_processor
_a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,np.ndarray )
_a : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Dict = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : str = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def __lowercase ( self : Union[str, Any] ):
# Initialize image_processor
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,torch.Tensor )
# Test not batched input
_a : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Optional[int] = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Union[str, Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __lowercase ( self : Tuple ):
_a : Tuple = PixaStructImageProcessingTester(self ,num_channels=4 )
_a : Tuple = 3
@property
def __lowercase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_convert_rgb' ) )
def __lowercase ( self : Tuple ):
# Initialize image_processor
_a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
_a : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Optional[int] = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Optional[Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
| 356
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 107
| 0
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
_lowerCamelCase : Optional[Any] = {
"""ctrl""": 256,
}
_lowerCamelCase : Tuple = {
"""Pregnancy""": 168629,
"""Christianity""": 7675,
"""Explain""": 106423,
"""Fitness""": 63440,
"""Saving""": 63163,
"""Ask""": 27171,
"""Ass""": 95985,
"""Joke""": 163509,
"""Questions""": 45622,
"""Thoughts""": 49605,
"""Retail""": 52342,
"""Feminism""": 164338,
"""Writing""": 11992,
"""Atheism""": 192263,
"""Netflix""": 48616,
"""Computing""": 39639,
"""Opinion""": 43213,
"""Alone""": 44967,
"""Funny""": 58917,
"""Gaming""": 40358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 77138,
"""Diet""": 36206,
"""Legal""": 11859,
"""Norman""": 4939,
"""Tip""": 72689,
"""Weight""": 52343,
"""Movies""": 46273,
"""Running""": 23425,
"""Science""": 2090,
"""Horror""": 37793,
"""Confession""": 60572,
"""Finance""": 12250,
"""Politics""": 16360,
"""Scary""": 191985,
"""Support""": 12654,
"""Technologies""": 32516,
"""Teenage""": 66160,
"""Event""": 32769,
"""Learned""": 67460,
"""Notion""": 182770,
"""Wikipedia""": 37583,
"""Books""": 6665,
"""Extract""": 76050,
"""Confessions""": 102701,
"""Conspiracy""": 75932,
"""Links""": 63674,
"""Narcissus""": 150425,
"""Relationship""": 54766,
"""Relationships""": 134796,
"""Reviews""": 41671,
"""News""": 4256,
"""Translation""": 26820,
"""multilingual""": 128406,
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(lowercase_ )
return pairs
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTROL_CODES
def __init__( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]="<unk>" , **UpperCAmelCase__ : List[Any]) ->List[str]:
'''simple docstring'''
super().__init__(unk_token=UpperCAmelCase__ , **UpperCAmelCase__)
with open(UpperCAmelCase__ , encoding='''utf-8''') as vocab_handle:
A__ = json.load(UpperCAmelCase__)
A__ = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase__ , encoding='''utf-8''') as merges_handle:
A__ = merges_handle.read().split('''\n''')[1:-1]
A__ = [tuple(merge.split()) for merge in merges]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = {}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[Any]) ->Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCAmelCase__)
A__ = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
A__ = get_pairs(UpperCAmelCase__)
if not pairs:
return token
while True:
A__ = min(UpperCAmelCase__ , key=lambda UpperCAmelCase__: self.bpe_ranks.get(UpperCAmelCase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCAmelCase__):
try:
A__ = word.index(UpperCAmelCase__ , UpperCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
A__ = j
if word[i] == first and i < len(UpperCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
A__ = tuple(UpperCAmelCase__)
A__ = new_word
if len(UpperCAmelCase__) == 1:
break
else:
A__ = get_pairs(UpperCAmelCase__)
A__ = '''@@ '''.join(UpperCAmelCase__)
A__ = word[:-4]
A__ = word
return word
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Any) ->Optional[int]:
'''simple docstring'''
A__ = []
A__ = re.findall(R'''\S+\n?''' , UpperCAmelCase__)
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase__).split(''' ''')))
return split_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__ , self.unk_token)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
A__ = ''' '''.join(UpperCAmelCase__).replace('''@@ ''' , '''''').strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__) + '''\n''')
A__ = 0
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
A__ = token_index
writer.write(''' '''.join(UpperCAmelCase__) + '''\n''')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 14
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = PriorTransformer
UpperCamelCase__ = '''hidden_states'''
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = 4
lowercase_ : int = 8
lowercase_ : Union[str, Any] = 7
lowercase_ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any]=0 ):
torch.manual_seed(lowercase_ )
lowercase_ : int = 4
lowercase_ : Any = 8
lowercase_ : Tuple = 7
lowercase_ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return (4, 8)
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return (4, 8)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
lowercase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ , lowercase_ : Tuple = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase_ )
lowercase_ : Optional[int] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.prepare_init_args_and_inputs_for_common()
lowercase_ : List[Any] = self.model_class(**lowercase_ )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
lowercase_ : Any = model.to(lowercase_ )
if hasattr(lowercase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
lowercase_ : Any = self.get_dummy_seed_input()
with torch.no_grad():
lowercase_ : List[str] = model(**lowercase_ )[0]
lowercase_ : Tuple = output[0, :5].flatten().cpu()
print(lowercase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase_ : Dict = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2 ) )
@slow
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=77 , lowercase_ : Optional[int]=0 ):
torch.manual_seed(lowercase_ )
lowercase_ : Optional[Any] = batch_size
lowercase_ : int = embedding_dim
lowercase_ : int = num_embeddings
lowercase_ : Dict = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Any = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] ):
lowercase_ : List[Any] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(lowercase_ )
lowercase_ : Optional[Any] = self.get_dummy_seed_input(seed=lowercase_ )
with torch.no_grad():
lowercase_ : Tuple = model(**lowercase_ )[0]
assert list(sample.shape ) == [1, 768]
lowercase_ : Union[str, Any] = sample[0, :8].flatten().cpu()
print(lowercase_ )
lowercase_ : Optional[Any] = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1E-3 )
| 239
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : Optional[Any], a_ : int, a_ : Union[str, Any]=7, a_ : List[Any]=3, a_ : Optional[int]=18, a_ : Union[str, Any]=30, a_ : Any=400, a_ : Union[str, Any]=True, a_ : Any=None, a_ : Union[str, Any]=True, a_ : str=None, a_ : List[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"shortest_edge": 20}
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_flip_channel_order
def lowercase_ ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : str = MobileViTImageProcessor if is_vision_available() else None
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = MobileViTImageProcessingTester(self )
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, "do_resize" ) )
self.assertTrue(hasattr(a_, "size" ) )
self.assertTrue(hasattr(a_, "do_center_crop" ) )
self.assertTrue(hasattr(a_, "center_crop" ) )
self.assertTrue(hasattr(a_, "do_flip_channel_order" ) )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84} )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
pass
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
lowercase_ = "Alexander Joslin"
import operator as op
from .stack import Stack
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
A__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
A__ = Stack()
A__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
A__ = operator_stack.peek()
operator_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowercase_ = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 7
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_00 ):
'''simple docstring'''
lowercase = 0
lowercase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : List[str] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : int):
A = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE)
@classmethod
def SCREAMING_SNAKE_CASE__ (cls : Dict):
try:
delete_repo(token=cls._token , repo_id="test-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("test-config" , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="test-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id="test-config" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : int):
A = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="valid_org/test-config-org" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token)
A = BertConfig.from_pretrained("valid_org/test-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
CustomConfig.register_for_auto_class()
A = CustomConfig(attribute=4_2)
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"})
A = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=__SCREAMING_SNAKE_CASE)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig")
self.assertEqual(new_config.attribute , 4_2)
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A = c.n_embd + 1 # int
A = c.resid_pdrop + 1.0 # float
A = not c.scale_attn_weights # bool
A = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.n_embd , "mismatch for key: n_embd")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.resid_pdrop , "mismatch for key: resid_pdrop")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.scale_attn_weights , "mismatch for key: scale_attn_weights")
self.assertEqual(__SCREAMING_SNAKE_CASE , c.summary_type , "mismatch for key: summary_type")
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = PretrainedConfig()
A = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"])
A = [key for key, value in config_common_kwargs.items() if value == getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
if len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {", ".join(__SCREAMING_SNAKE_CASE)}.""")
def SCREAMING_SNAKE_CASE__ (self : Dict):
with self.assertRaises(__SCREAMING_SNAKE_CASE):
# config is in subfolder, the following should not work without specifying the subfolder
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder")
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert")
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : int):
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 5_0_0
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__SCREAMING_SNAKE_CASE) as mock_head:
A = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This test is for deprecated behavior and can be removed in v5
A = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json")
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = AutoConfig.from_pretrained("bert-base-cased")
A = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
A = 2
json.dump(configuration.to_dict() , open(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , "w"))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A = ["config.42.0.0.json"]
A = 7_6_8
configuration.save_pretrained(__SCREAMING_SNAKE_CASE)
shutil.move(os.path.join(__SCREAMING_SNAKE_CASE , "config.4.0.0.json") , os.path.join(__SCREAMING_SNAKE_CASE , "config.42.0.0.json"))
A = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 7_6_8)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
A = "v4.0.0"
A , A = new_transformers.models.auto.AutoConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A = "v3.0.0"
A = old_transformers.models.auto.AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(old_configuration.hidden_size , 7_6_8)
| 57
| 0
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[1_28, 2_56, 3_84] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a : int = False
__a : Dict = False
__a : Optional[Any] = False
__a : Optional[int] = False
__a : Dict = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
__lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowercase = problem_type['''title''']
__lowercase = problem_type['''num_labels''']
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
__lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__lowercase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
__lowercase = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 210
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : int = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Any = '''wavlm'''
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=3_20 , lowerCAmelCase__=8_00 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=80 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = conv_bias
__lowercase = num_buckets
__lowercase = max_bucket_distance
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = num_ctc_classes
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 210
| 1
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_a = logging.get_logger(__name__)
_a = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class A_ (A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """bart"""
SCREAMING_SNAKE_CASE__ : List[Any] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase_=5_0265 , lowercase_=1024 , lowercase_=12 , lowercase_=4096 , lowercase_=16 , lowercase_=12 , lowercase_=4096 , lowercase_=16 , lowercase_=0.0 , lowercase_=0.0 , lowercase_="gelu" , lowercase_=1024 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=False , lowercase_=True , lowercase_=3 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=True , lowercase_=2 , lowercase_=2 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : Optional[int] = encoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = encoder_layers
UpperCAmelCase_ : Optional[Any] = encoder_attention_heads
UpperCAmelCase_ : str = decoder_ffn_dim
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : str = decoder_attention_heads
UpperCAmelCase_ : Optional[Any] = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : str = activation_dropout
UpperCAmelCase_ : Optional[Any] = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : List[Any] = decoder_layerdrop
UpperCAmelCase_ : str = classifier_dropout
UpperCAmelCase_ : Dict = use_cache
UpperCAmelCase_ : List[Any] = encoder_layers
UpperCAmelCase_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __snake_case ):
UpperCAmelCase_ : Tuple = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class A_ (A__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ : Union[str, Any] = {0: "batch"}
UpperCAmelCase_ : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ : Any = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.num_layers
for i in range(__snake_case ):
UpperCAmelCase_ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCAmelCase_ : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : List[Any] = super().outputs
else:
UpperCAmelCase_ : Union[str, Any] = super(__snake_case , self ).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.num_layers
for i in range(__snake_case ):
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
UpperCAmelCase_ : Any = seq_length if not self.use_past else 1
UpperCAmelCase_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Optional[Any] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ : Optional[int] = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : Dict = common_inputs["input_ids"].shape
UpperCAmelCase_ : Tuple = common_inputs["decoder_input_ids"].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.num_attention_heads
UpperCAmelCase_ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ : Any = decoder_seq_length + 3
UpperCAmelCase_ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__snake_case , __snake_case )] , dim=1 )
UpperCAmelCase_ : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.num_layers
UpperCAmelCase_ : Union[str, Any] = min(__snake_case , __snake_case )
UpperCAmelCase_ : Optional[int] = max(__snake_case , __snake_case ) - min_num_layers
UpperCAmelCase_ : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
UpperCAmelCase_ : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase_ : Optional[int] = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.num_attention_heads
UpperCAmelCase_ : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ : List[Any] = common_inputs["attention_mask"].dtype
UpperCAmelCase_ : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
UpperCAmelCase_ : List[str] = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
"""simple docstring"""
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : Dict = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : str = tokenizer.num_special_tokens_to_add(__snake_case )
UpperCAmelCase_ : Optional[int] = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : List[str] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase_ : List[Any] = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
UpperCAmelCase_ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
UpperCAmelCase_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ : Optional[int] = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
UpperCAmelCase_ : int = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 353
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23
| 0
|
'''simple docstring'''
from math import factorial
lowerCAmelCase_ : Optional[int] = {str(d): factorial(d) for d in range(10)}
def _lowerCamelCase ( lowercase : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase ) )
def _lowerCamelCase ( ) -> int:
_a = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowercase ) if sum_of_digit_factorial(lowercase ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A : Tuple = logging.getLogger(__name__)
def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( __a :Tuple ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf_8""" ) as f:
A__ = csv.reader(__a )
A__ = []
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for dataset in encoded_datasets:
A__ = len(__a )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(__a ) - 1
A__ = len(__a ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=__a , default="""""" )
parser.add_argument("""--eval_dataset""" , type=__a , default="""""" )
parser.add_argument("""--seed""" , type=__a , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=__a , default=3 )
parser.add_argument("""--train_batch_size""" , type=__a , default=8 )
parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=__a , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=__a , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=__a , default=0.01 )
parser.add_argument("""--lm_coef""" , type=__a , default=0.9 )
parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ["""_start_""", """_delimiter_""", """_classify_"""]
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
A__ = tokenizer.convert_tokens_to_ids(__a )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a :Tuple ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info("""Encoding dataset...""" )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(__a )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(__a , __a , __a , *__a )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*__a )
A__ = RandomSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
A__ = TensorDataset(*__a )
A__ = SequentialSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
A__ = 0
A__ = 0
A__ = tqdm(__a , desc="""Training""" )
for step, batch in enumerate(__a ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , __a )
A__ = os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(__a , desc="""Evaluating""" ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to("""cpu""" ).numpy()
A__ = accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 274
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = {}
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> None:
UpperCAmelCase : int = {}
def UpperCAmelCase_ ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : float ) -> None:
if nodea not in self.connections:
self.add_node(lowercase_ )
if nodea not in self.connections:
self.add_node(lowercase_ )
UpperCAmelCase : Any = probability
def UpperCAmelCase_ ( self : Union[str, Any] ) -> list[str]:
return list(self.connections )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str ) -> str:
UpperCAmelCase : Any = 0
UpperCAmelCase : Union[str, Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Tuple = Counter(graph.get_nodes() )
UpperCAmelCase : Tuple = start
for _ in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = graph.transition(UpperCAmelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=UpperCAmelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=UpperCAmelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=UpperCAmelCase_ , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : int = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
UpperCAmelCase : Any = dpr_record['question']
UpperCAmelCase : List[str] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(UpperCAmelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 280
| 1
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """data2vec-audio"""
def __init__(self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE_=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=19 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=0.05 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="sum" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE_=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = list(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = xvector_output_dim
@property
def UpperCAmelCase_ (self ):
return math.prod(self.conv_stride )
| 244
|
from collections.abc import Generator
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ = b, a + b
yield b
def __magic_name__ ( __a : int = 1_000 ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = fibonacci_generator()
while len(str(next(__a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 244
| 1
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(UpperCamelCase ) / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = VQModel
__lowercase : Tuple = '''sample'''
@property
def UpperCAmelCase_ ( self ,__UpperCAmelCase=(32, 32) ) -> List[Any]:
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (3, 32, 32)
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
lowerCAmelCase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" ,output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Tuple = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ : List[str] = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
lowerCAmelCase__ : Optional[Any] = image.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ).sample
lowerCAmelCase__ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Tuple = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-3 ) )
| 184
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[Any] =logging.get_logger(__name__)
a__ : Tuple ={
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int ="focalnet"
def __init__( self : List[Any] , __A : int=2_2_4 , __A : Tuple=4 , __A : Tuple=3 , __A : List[str]=9_6 , __A : Any=False , __A : List[Any]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , __A : Union[str, Any]=[2, 2, 6, 2] , __A : str=[2, 2, 2, 2] , __A : Optional[int]=[3, 3, 3, 3] , __A : Optional[Any]="gelu" , __A : Any=4.0 , __A : List[Any]=0.0 , __A : Union[str, Any]=0.1 , __A : str=False , __A : Any=1e-4 , __A : List[Any]=False , __A : Union[str, Any]=False , __A : Union[str, Any]=False , __A : List[Any]=0.02 , __A : Tuple=1e-5 , __A : List[str]=3_2 , __A : Optional[int]=None , __A : int=None , **__A : List[Any] , ):
super().__init__(**__A )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = use_conv_embed
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = focal_levels
__UpperCamelCase = focal_windows
__UpperCamelCase = hidden_act
__UpperCamelCase = mlp_ratio
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = use_layerscale
__UpperCamelCase = layerscale_value
__UpperCamelCase = use_post_layernorm
__UpperCamelCase = use_post_layernorm_in_modulation
__UpperCamelCase = normalize_modulator
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = encoder_stride
__UpperCamelCase = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 53
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 53
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = True , lowercase = 1 / 255 , lowercase = None , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Any = size if size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : List[str] = get_size_dict(lowercase )
_lowerCamelCase : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : List[Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size' )
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : str = do_rescale
_lowerCamelCase : Optional[Any] = do_normalize
_lowerCamelCase : Optional[int] = do_center_crop
_lowerCamelCase : Union[str, Any] = crop_size
_lowerCamelCase : Any = size
_lowerCamelCase : Union[str, Any] = resample
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCamelCase : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ):
_lowerCamelCase : Optional[Any] = get_size_dict(lowercase )
if "shortest_edge" in size:
_lowerCamelCase : List[str] = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowerCamelCase : Optional[Any] = (size['height'], size['width'])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
_lowerCamelCase : int = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Tuple = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase )
_lowerCamelCase : Any = resample if resample is not None else self.resample
_lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Dict = size if size is not None else self.size
_lowerCamelCase : List[str] = get_size_dict(lowercase )
if not is_batched(lowercase ):
_lowerCamelCase : List[Any] = [images]
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_lowerCamelCase : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
_lowerCamelCase : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
_lowerCamelCase : List[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : List[Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 355
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 12
| 0
|
import numpy as np
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1e-12 , _lowerCAmelCase = 100 , ) -> tuple[float, np.ndarray]:
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase )
UpperCamelCase : Optional[int] = np.iscomplexobj(_lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCamelCase : str = False
UpperCamelCase : int = 0
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 1e12
while not convergence:
# Multiple matrix by the vector.
UpperCamelCase : Any = np.dot(_lowerCAmelCase , _lowerCAmelCase )
# Normalize the resulting output vector.
UpperCamelCase : List[str] = w / np.linalg.norm(_lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCamelCase : List[str] = vector.conj().T if is_complex else vector.T
UpperCamelCase : List[Any] = np.dot(_lowerCAmelCase , np.dot(_lowerCAmelCase , _lowerCAmelCase ) )
# Check convergence.
UpperCamelCase : List[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = lambda_
if is_complex:
UpperCamelCase : Optional[Any] = np.real(lambda_ )
return lambda_, vector
def A_ ( ) -> None:
UpperCamelCase : Any = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCamelCase : str = np.array([41, 4, 20] )
UpperCamelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa )
UpperCamelCase : Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCamelCase : Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCamelCase : int = real_input_matrix
UpperCamelCase : Any = real_vector
elif problem_type == "complex":
UpperCamelCase : Union[str, Any] = complex_input_matrix
UpperCamelCase : Tuple = complex_vector
# Our implementation.
UpperCamelCase , UpperCamelCase : List[Any] = power_iteration(_lowerCAmelCase , _lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCamelCase , UpperCamelCase : Optional[int] = np.linalg.eigh(_lowerCAmelCase )
# Last eigenvalue is the maximum one.
UpperCamelCase : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCamelCase : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 52
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCamelCase : Union[str, Any] = pytest.mark.integration
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
UpperCamelCase : List[Any] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCamelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=A_ )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores]
UpperCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dict = faiss.IndexFlat(5 )
UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase : str = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : int = 1
UpperCamelCase , UpperCamelCase : Dict = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A_ ( _lowerCAmelCase ) -> Optional[int]:
import faiss
UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase : List[Any] = "index.faiss"
UpperCamelCase : List[str] = F"""mock://{index_name}"""
index.save(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[int] = 1
UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = Elasticsearch()
UpperCamelCase : Union[str, Any] = {"acknowledged": True}
UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCamelCase : str = "foo"
UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase : Dict = "foo"
UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase : Dict = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ )
UpperCamelCase : str = [scores[0] for scores in total_scores]
UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
UpperCamelCase : int = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 )
UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores]
UpperCamelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 52
| 1
|
from math import factorial
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0 ):
"""simple docstring"""
return sum(int(lowerCAmelCase__ ) for x in str(factorial(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315
| 0
|
"""simple docstring"""
import argparse
import os
import re
lowercase_ = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowercase_ = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
lowercase_ = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool = False ) -> Dict:
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
__a = content.split('''\n''' )
__a = []
__a = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__a = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__a = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__a = sorted(snake_case__ , key=lambda lowerCAmelCase__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def lowercase ( lowerCAmelCase__ : bool = False ) -> Any:
__a = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
__a = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
__a = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowercase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 45
|
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30
| 0
|
import os
_SCREAMING_SNAKE_CASE = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
while index < len(__a ) - 1:
snake_case_ : Any = SYMBOLS[numerals[index]]
snake_case_ : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = ''
snake_case_ : str = num // 10_00
numerals += m_count * "M"
num %= 10_00
snake_case_ : List[Any] = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
snake_case_ : Tuple = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def SCREAMING_SNAKE_CASE__ ( __a = "/p089_roman.txt" ):
snake_case_ : Optional[int] = 0
with open(os.path.dirname(__a ) + roman_numerals_filename ) as filea:
snake_case_ : List[Any] = filea.readlines()
for line in lines:
snake_case_ : Optional[int] = line.strip()
snake_case_ : Dict = parse_roman_numerals(__a )
snake_case_ : List[str] = generate_roman_numerals(__a )
savings += len(__a ) - len(__a )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88
|
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__a ):
snake_case_ : Union[str, Any] = burst_time[i]
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
snake_case_ : Any = 9_99_99_99_99
snake_case_ : Tuple = 0
snake_case_ : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__a ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case_ : str = remaining_time[j]
snake_case_ : Any = j
snake_case_ : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case_ : Any = remaining_time[short]
if minm == 0:
snake_case_ : Dict = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
snake_case_ : List[str] = False
# Find finish time of current process
snake_case_ : List[str] = increment_time + 1
# Calculate waiting time
snake_case_ : Any = finish_time - arrival_time[short]
snake_case_ : Any = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case_ : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Tuple = [0] * no_of_processes
for i in range(__a ):
snake_case_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : int = 0
snake_case_ : Optional[Any] = 0
for i in range(__a ):
snake_case_ : int = total_waiting_time + waiting_time[i]
snake_case_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_SCREAMING_SNAKE_CASE = int(input())
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input().split())
_SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE = burst_time
_SCREAMING_SNAKE_CASE = no_of_processes
_SCREAMING_SNAKE_CASE = waiting_time
_SCREAMING_SNAKE_CASE = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_SCREAMING_SNAKE_CASE = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 88
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284
| 0
|
import os
import pytest
from attr import dataclass
__lowerCamelCase = """us-east-1""" # defaults region
@dataclass
class UpperCAmelCase :
A__ : Union[str, Any] = 42
A__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
A__ : Dict = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_00,
"save_steps": 55_00,
}
A__ : Optional[Any] = {**hyperparameters, "max_steps": 10_00}
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
return f"""{self.framework}-transfromers-test"""
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
snake_case : Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 368
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = """."""
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = """\n""".join(non_existent_paths)
raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 10
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[Any] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2
|
'''simple docstring'''
from math import isclose, sqrt
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
UpperCAmelCase : Optional[int] = point_y / 4 / point_x
UpperCAmelCase : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase : Optional[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
UpperCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case_ ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
UpperCAmelCase : int = 0
UpperCAmelCase : float = first_x_coord
UpperCAmelCase : float = first_y_coord
UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 23
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : List[str] = {}
class a__ ( UpperCamelCase__ ):
a : Union[str, Any] = "llama"
a : Union[str, Any] = ["past_key_values"]
def __init__( self , A=32000 , A=4096 , A=11008 , A=32 , A=32 , A=None , A="silu" , A=2048 , A=0.0_2 , A=1e-6 , A=True , A=0 , A=1 , A=2 , A=1 , A=False , A=None , **A , ) -> int:
'''simple docstring'''
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
a = self.rope_scaling.get("type" , A )
a = self.rope_scaling.get("factor" , A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A , A ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 353
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
a = 0
a = number
while duplicate > 0:
a , a = divmod(__UpperCamelCase , 10)
fact_sum += factorial(__UpperCamelCase)
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
lowercase__ : str = int(input("Enter number: ").strip())
print(
F'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 180
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[str] = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase :Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
__UpperCamelCase :List[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__UpperCamelCase :List[str] = os.path.join(self.tmpdirname , __lowercase)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__lowercase , __lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self) -> Dict:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCamelCase :Any = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self.get_tokenizer()
__UpperCamelCase :List[str] = self.get_image_processor()
__UpperCamelCase :List[Any] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCamelCase :Any = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0)
__UpperCamelCase :List[Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast))
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = self.get_image_processor()
__UpperCamelCase :Optional[Any] = self.get_tokenizer()
__UpperCamelCase :str = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :str = self.prepare_image_inputs()
__UpperCamelCase :Optional[int] = image_processor(__lowercase , return_tensors='''np''')
__UpperCamelCase :str = processor(images=__lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[Any] = self.get_image_processor()
__UpperCamelCase :List[Any] = self.get_tokenizer()
__UpperCamelCase :Any = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Optional[Any] = '''lower newer'''
__UpperCamelCase :Optional[int] = processor(text=__lowercase)
__UpperCamelCase :Any = tokenizer(__lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = self.get_image_processor()
__UpperCamelCase :int = self.get_tokenizer()
__UpperCamelCase :Any = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Tuple = '''lower newer'''
__UpperCamelCase :Tuple = self.prepare_image_inputs()
__UpperCamelCase :Dict = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with self.assertRaises(__lowercase):
processor()
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = self.get_image_processor()
__UpperCamelCase :Dict = self.get_tokenizer()
__UpperCamelCase :Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase :Union[str, Any] = processor.batch_decode(__lowercase)
__UpperCamelCase :str = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Tuple = '''lower newer'''
__UpperCamelCase :Optional[int] = self.prepare_image_inputs()
__UpperCamelCase :Tuple = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 43
|
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_a : int= datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
UpperCAmelCase : Optional[datasets.Features] = None
UpperCAmelCase : str = "utf-8"
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : bool = True # deprecated
UpperCAmelCase : Optional[int] = None # deprecated
UpperCAmelCase : int = 10 << 20 # 10MB
UpperCAmelCase : Optional[bool] = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
UpperCAmelCase : int = JsonConfig
def _lowercase (self : int) -> List[str]:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
__snake_case : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def _lowercase (self : Dict , _A : Any) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__snake_case : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_A , (str, list, tuple)):
__snake_case : str = data_files
if isinstance(_A , _A):
__snake_case : int = [files]
__snake_case : Tuple = [dl_manager.iter_files(_A) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__snake_case : str = []
for split_name, files in data_files.items():
if isinstance(_A , _A):
__snake_case : Optional[int] = [files]
__snake_case : int = [dl_manager.iter_files(_A) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files}))
return splits
def _lowercase (self : Optional[Any] , _A : pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
__snake_case : List[Any] = self.config.features.arrow_schema.field(_A).type
__snake_case : Any = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[str] = table_cast(_A , self.config.features.arrow_schema)
return pa_table
def _lowercase (self : Dict , _A : Any) -> Union[str, Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_A)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : Tuple = json.load(_A)
# We keep only the field we are interested in
__snake_case : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple)):
__snake_case : Optional[int] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
else:
__snake_case : Optional[int] = dataset
__snake_case : Tuple = pa.Table.from_pydict(_A)
yield file_idx, self._cast_table(_A)
# If the file has one json object per line
else:
with open(_A , 'rb') as f:
__snake_case : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__snake_case : Tuple = max(self.config.chunksize // 32 , 16 << 10)
__snake_case : str = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__snake_case : Union[str, Any] = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__snake_case : int = batch.decode(self.config.encoding , errors=_A).encode('utf-8')
try:
while True:
try:
__snake_case : Tuple = paj.read_json(
io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid)
and "straddling" not in str(_A)
or block_size > len(_A)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : List[Any] = json.load(_A)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A): # list is the only sequence type supported in JSON
try:
__snake_case : List[str] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
__snake_case : List[str] = pa.Table.from_pydict(_A)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(_A)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A)
batch_idx += 1
| 172
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
__A = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def snake_case_(_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = torch.load(_UpperCamelCase , map_location='''cpu''' )
return sd
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ) -> List[Any]:
"""simple docstring"""
_snake_case = OrderedDict()
_snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_snake_case = key
for name_pair in rename_keys_prefix:
_snake_case = new_key.replace(name_pair[0] , name_pair[1] )
_snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_snake_case = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
_snake_case = '''pretraining'''
if "vcr" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048}
elif "vqa" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048}
elif "nlvr" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 1_024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 512}
_snake_case = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048}
_snake_case = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_snake_case = {'''visual_embedding_dim''': 2_048, '''num_labels''': 3_129}
_snake_case = '''vqa'''
elif "nlvr" in checkpoint_path:
_snake_case = {
'''visual_embedding_dim''': 1_024,
'''num_labels''': 2,
}
_snake_case = '''nlvr'''
_snake_case = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
_snake_case = load_state_dict(_UpperCamelCase )
_snake_case = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
_snake_case = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
_snake_case = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
_snake_case = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
_snake_case = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
__A = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 278
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A = logging.get_logger(__name__)
class lowercase_ ( __lowercase ):
def __init__( self : Optional[Any] , *A__ : List[Any] , **A__ : int ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , A__ , )
super().__init__(*A__ , **A__ )
| 278
| 1
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_a = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCamelCase__ = getattr(__lowercase ,__lowercase )
if weight_type is not None:
lowerCamelCase__ = getattr(__lowercase ,__lowercase ).shape
else:
lowerCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == '''group''' ,)
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(__lowercase )[0].split('''.''' )[-2]
lowerCamelCase__ = mapped_key.replace('''*''' ,__lowercase )
if "weight_g" in name:
lowerCamelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowerCamelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ = '''weight'''
else:
lowerCamelCase__ = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ = name.split('''.''' )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = torch.load(__lowercase )
lowerCamelCase__ = WavLMConfigOrig(checkpoint['''cfg'''] )
lowerCamelCase__ = WavLMOrig(__lowercase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowerCamelCase__ = WavLMConfig.from_pretrained(__lowercase )
else:
lowerCamelCase__ = WavLMConfig()
lowerCamelCase__ = WavLMModel(__lowercase )
recursively_load_weights(__lowercase ,__lowercase )
hf_wavlm.save_pretrained(__lowercase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_a = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 209
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''gpt_neox'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = rotary_pct
_A = rotary_emb_base
_A = attention_dropout
_A = hidden_dropout
_A = classifier_dropout
_A = initializer_range
_A = layer_norm_eps
_A = use_cache
_A = tie_word_embeddings
_A = use_parallel_residual
_A = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_A = self.rope_scaling.get("type" , __UpperCAmelCase )
_A = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 79
| 0
|
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE : Tuple = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE : Optional[Any] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
SCREAMING_SNAKE_CASE : Union[str, Any] = "zero2"
SCREAMING_SNAKE_CASE : Optional[Any] = "zero3"
SCREAMING_SNAKE_CASE : int = [ZEROa, ZEROa]
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> Dict:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__lowerCamelCase = parameterized.to_safe_name('''_'''.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( __lowercase ):
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> str:
self.run_and_check(
stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] = 10 , SCREAMING_SNAKE_CASE__ : Optional[Any] = True , SCREAMING_SNAKE_CASE__ : List[Any] = True , SCREAMING_SNAKE_CASE__ : Union[str, Any] = True , ) -> List[str]:
__lowerCamelCase = models[model]
__lowerCamelCase = self.run_trainer(
stage=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , eval_steps=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , )
self.do_checks(SCREAMING_SNAKE_CASE__ )
return output_dir
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict = 10 , SCREAMING_SNAKE_CASE__ : Tuple = 1 , SCREAMING_SNAKE_CASE__ : Optional[Any] = True , SCREAMING_SNAKE_CASE__ : List[Any] = True , ) -> str:
__lowerCamelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__lowerCamelCase = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
__lowerCamelCase = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
__lowerCamelCase = self.get_launcher(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
return output_dir
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> str:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__lowerCamelCase = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 362
|
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_lowerCAmelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_lowerCAmelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_lowerCAmelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( snake_case_ ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 355
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __magic_name__ ( lowercase = "https://www.worldometers.info/coronavirus" ):
SCREAMING_SNAKE_CASE_: List[str] =BeautifulSoup(requests.get(lowercase ).text , """html.parser""" )
SCREAMING_SNAKE_CASE_: Tuple =soup.findAll("""h1""" )
SCREAMING_SNAKE_CASE_: List[str] =soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowercase , lowercase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 173
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__ ( ):
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Dict =canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE_: List[Any] =canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__ ( ):
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def __magic_name__ ( ):
# laplace diagonals
SCREAMING_SNAKE_CASE_: str =array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE_: Tuple =conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def __magic_name__ ( ):
assert med.median_filter(lowercase , 3 ).any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =sp.make_sepia(lowercase , 20 )
assert sepia.all()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" ):
SCREAMING_SNAKE_CASE_: Dict =bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __magic_name__ ( lowercase = "digital_image_processing/image_data/lena_small.jpg" , ):
SCREAMING_SNAKE_CASE_: int =rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str ="""digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE_: Tuple =imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: Any =0
SCREAMING_SNAKE_CASE_: List[Any] =image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE_: Optional[Any] =lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE_: Dict =np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE_: List[str] =lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 173
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __magic_name__ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *snake_case :List[str] , **snake_case :Tuple ):
'''simple docstring'''
pass
def __snake_case ( _lowerCAmelCase : int ) -> Optional[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_lowerCAmelCase : str = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :str , snake_case :Dict ):
'''simple docstring'''
A_ : Tuple = pipeline(
"document-question-answering" , model=snake_case , tokenizer=snake_case , image_processor=snake_case )
A_ : List[Any] = INVOICE_URL
A_ : List[str] = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , "" ) ) )
A_ : List[str] = "What is the placebo?"
A_ : List[str] = [
{
"image": load_image(snake_case ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Optional[int] , snake_case :Dict ):
'''simple docstring'''
A_ : Tuple = dqa_pipeline(snake_case , top_k=2 )
self.assertEqual(
snake_case , [
[
{"score": ANY(snake_case ), "answer": ANY(snake_case ), "start": ANY(snake_case ), "end": ANY(snake_case )},
{"score": ANY(snake_case ), "answer": ANY(snake_case ), "start": ANY(snake_case ), "end": ANY(snake_case )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
A_ : Union[str, Any] = INVOICE_URL
A_ : List[str] = "How many cats are there?"
A_ : Optional[int] = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
A_ : int = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , snake_case )
A_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A_ : List[str] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(snake_case , [] )
# We can optionnally pass directly the words and bounding boxes
A_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A_ : int = []
A_ : List[str] = []
A_ : Optional[Any] = dqa_pipeline(image=snake_case , question=snake_case , words=snake_case , boxes=snake_case , top_k=2 )
self.assertEqual(snake_case , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
A_ : Optional[Any] = INVOICE_URL
A_ : Optional[Any] = "What is the invoice number?"
A_ : List[Any] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
A_ : Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
A_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
A_ : List[Any] = INVOICE_URL
A_ : Optional[int] = "What is the invoice number?"
A_ : str = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
A_ : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
A_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case )
A_ : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case , revision="3dc6de3" , )
A_ : Tuple = INVOICE_URL
A_ : int = "What is the invoice number?"
A_ : Optional[int] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
A_ : List[str] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
A_ : Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
A_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , "" ) ) )
# This model should also work if `image` is set to None
A_ : Tuple = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : str = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case )
A_ : Optional[int] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case , revision="3dc6de3" , max_seq_len=50 , )
A_ : Tuple = INVOICE_URL
A_ : int = "What is the invoice number?"
A_ : List[Any] = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
A_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
A_ : Dict = list(zip(*apply_tesseract(load_image(snake_case ) , snake_case , "" ) ) )
# This model should also work if `image` is set to None
A_ : str = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
A_ : Any = INVOICE_URL
A_ : Optional[int] = "What is the invoice number?"
A_ : Any = dqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(nested_simplify(snake_case , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
pass
| 70
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
_lowerCAmelCase : Dict[Optional[str], str] = {}
_lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __snake_case ( _lowerCAmelCase : type , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None , ) -> List[Any]:
A_ : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
A_ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
A_ : Union[str, Any] = format_type
def __snake_case ( _lowerCAmelCase : Exception , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None ) -> Optional[int]:
A_ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCAmelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCAmelCase : Tuple = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCAmelCase : List[str] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __snake_case ( _lowerCAmelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : str ) -> Formatter:
A_ : str = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 70
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCAmelCase :
_lowercase: int = 42 # [batch_size x 3]
_lowercase: Tuple = 42 # [batch_size x 3]
_lowercase: Dict = 42 # [batch_size x 3]
_lowercase: Tuple = 42 # [batch_size x 3]
_lowercase: List[str] = 42
_lowercase: int = 42
_lowercase: Optional[int] = 42
_lowercase: Optional[int] = 42
_lowercase: Union[str, Any] = 42
def lowercase__ ( self : List[str] ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase__ ( self : Optional[int] ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowercase__ ( self : Any ) -> List[str]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowercase__ ( self : List[Any] ) -> torch.Tensor:
_lowerCAmelCase = torch.arange(self.height * self.width )
_lowerCAmelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def lowercase__ ( self : Any ) -> Optional[Any]:
_lowerCAmelCase , *_lowerCAmelCase = self.shape
_lowerCAmelCase = int(np.prod(__snake_case ) )
_lowerCAmelCase = self.get_image_coords()
_lowerCAmelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_lowerCAmelCase = self.get_camera_rays(__snake_case )
_lowerCAmelCase = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowercase__ ( self : List[str] , __snake_case : int ) -> torch.Tensor:
_lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowerCAmelCase = coords.view(__snake_case , -1 , 2 )
_lowerCAmelCase = self.resolution()
_lowerCAmelCase = self.fov()
_lowerCAmelCase = (flat.float() / (res - 1)) * 2 - 1
_lowerCAmelCase = fracs * torch.tan(fov / 2 )
_lowerCAmelCase = fracs.view(__snake_case , -1 , 2 )
_lowerCAmelCase = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
_lowerCAmelCase = directions / directions.norm(dim=-1 , keepdim=__snake_case )
_lowerCAmelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def lowercase__ ( self : Any , __snake_case : int , __snake_case : List[str] ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_lowerCAmelCase = np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowerCAmelCase = -z * 4
_lowerCAmelCase = np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] )
_lowerCAmelCase = np.cross(__lowerCamelCase , __lowerCamelCase )
origins.append(__lowerCamelCase )
xs.append(__lowerCamelCase )
ys.append(__lowerCamelCase )
zs.append(__lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCamelCase )) , )
| 70
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (position - 1) // 2
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (2 * position) + 1
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (2 * position) + 2
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : list[tuple[T, int]] = []
lowerCAmelCase : dict[T, int] = {}
lowerCAmelCase : int = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def lowercase__ ( self ):
"""simple docstring"""
return self.elements == 0
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
self.heap.append((elem, weight) )
lowerCAmelCase : Tuple = self.elements
self.elements += 1
self._bubble_up(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase : Tuple = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase : Union[str, Any] = self.heap[0]
self._bubble_down(snake_case__ )
return elem
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = self.position_map[elem]
lowerCAmelCase : Optional[int] = (elem, weight)
if position > 0:
lowerCAmelCase : Union[str, Any] = get_parent_position(snake_case__ )
lowerCAmelCase : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(snake_case__ )
else:
self._bubble_down(snake_case__ )
else:
self._bubble_down(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase : List[str] = get_parent_position(snake_case__ )
lowerCAmelCase : str = self.heap[curr_pos]
lowerCAmelCase : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_up(snake_case__ )
return None
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.position_map[elem]
lowerCAmelCase : List[str] = self.heap[curr_pos]
lowerCAmelCase : Optional[Any] = get_child_left_position(snake_case__ )
lowerCAmelCase : Union[str, Any] = get_child_right_position(snake_case__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase : Optional[Any] = self.heap[child_left_position]
lowerCAmelCase : Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_down(snake_case__ )
if child_left_position < self.elements:
lowerCAmelCase : int = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_down(snake_case__ )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(snake_case__ , snake_case__ )
return self._bubble_down(snake_case__ )
return None
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.heap[nodea_pos][0]
lowerCAmelCase : Optional[Any] = self.heap[nodea_pos][0]
lowerCAmelCase : Tuple = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase : int = nodea_pos
lowerCAmelCase : str = nodea_pos
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : dict[T, dict[T, int]] = {}
lowerCAmelCase : int = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if node not in self.connections:
lowerCAmelCase : List[Any] = {}
self.nodes += 1
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
self.add_node(snake_case__ )
self.add_node(snake_case__ )
lowerCAmelCase : List[Any] = weight
lowerCAmelCase : Dict = weight
def a__ ( SCREAMING_SNAKE_CASE : GraphUndirectedWeighted[T] , ):
'''simple docstring'''
lowerCAmelCase : dict[T, int] = {node: maxsize for node in graph.connections}
lowerCAmelCase : dict[T, T | None] = {node: None for node in graph.connections}
lowerCAmelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase : Optional[Any] = priority_queue.extract_min()
lowerCAmelCase : Optional[int] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
lowerCAmelCase : Optional[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
lowerCAmelCase : Tuple = node
return dist, parent
| 359
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowercase__ ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCAmelCase : Dict = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase : Dict = len(snake_case__ )
self.assertGreater(snake_case__ , 0 )
self.assertEqual(
snake_case__ , [
{
"score": ANY(snake_case__ ),
"label": ANY(snake_case__ ),
"box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )},
}
for i in range(snake_case__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCAmelCase : Tuple = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
lowerCAmelCase : Optional[Any] = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = pipeline("zero-shot-object-detection" )
lowerCAmelCase : Dict = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
lowerCAmelCase : Dict = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = 0.2
lowerCAmelCase : List[Any] = pipeline("zero-shot-object-detection" )
lowerCAmelCase : Union[str, Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = 2
lowerCAmelCase : Any = pipeline("zero-shot-object-detection" )
lowerCAmelCase : Any = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 133
| 0
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger()
def UpperCamelCase_( snake_case : Path , snake_case : list ):
'''simple docstring'''
snake_case_ = "\n".join(snake_case )
Path(snake_case ).open("w" ).writelines(snake_case )
_SCREAMING_SNAKE_CASE : List[str] = "patrickvonplaten/t5-tiny-random"
_SCREAMING_SNAKE_CASE : str = "sshleifer/bart-tiny-random"
_SCREAMING_SNAKE_CASE : Optional[int] = "sshleifer/tiny-mbart"
_SCREAMING_SNAKE_CASE : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
snake_case_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
snake_case_ = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(a__ , a__ )
snake_case_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
snake_case_ = "translation_en_to_de" if model == T5_TINY else "summarization"
snake_case_ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(a__ , "argv" , a__ ):
run_generate()
assert Path(a__ ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
self.run_eval_tester(a__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
snake_case_ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
snake_case_ = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
snake_case_ = Path(self.get_auto_remove_tmp_dir() )
snake_case_ = str(tmp_dir / "scores.json" )
snake_case_ = str(tmp_dir / "val.target" )
_dump_articles(a__ , text["en"] )
_dump_articles(a__ , text["de"] )
snake_case_ = "translation_en_to_de" if model == T5_TINY else "summarization"
snake_case_ = F'\n run_eval_search.py\n {model}\n {str(a__ )}\n {str(a__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(a__ , "argv" , a__ ):
with CaptureStdout() as cs:
run_search()
snake_case_ = [" num_beams | length_penalty", model, "Best score args"]
snake_case_ = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(a__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a__ ).exists()
os.remove(Path(a__ ) )
| 85
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path / "cache"
lowerCamelCase__: Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: str =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Optional[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[int] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: List[str] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =features.copy() if features else default_expected_features
lowerCamelCase__: Union[str, Any] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Union[str, Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase__: Union[str, Any] ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Union[str, Any] ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Tuple =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: Optional[int] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: int =Features({"image": Image()} )
lowerCamelCase__: Tuple =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Optional[Any] =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: List[str] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 10
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE__ ( ) -> str:
snake_case_ : Tuple = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_lowerCamelCase )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
snake_case_ : int = parse_args()
# Import training_script as a module.
snake_case_ : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : Any = script_fpath.stem
snake_case_ : str = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
snake_case_ : str = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 370
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ ):
@register_to_config
def __init__( self : Optional[Any] , *,
_A : int = 4 , _A : int = 768 , _A : int , _A : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__()
snake_case_ : int = nn.Parameter(torch.zeros(_A ) )
# parameters for additional clip time embeddings
snake_case_ : Tuple = nn.Linear(_A , _A )
snake_case_ : List[Any] = nn.Linear(_A , _A )
# parameters for encoder hidden states
snake_case_ : Union[str, Any] = clip_extra_context_tokens
snake_case_ : str = nn.Linear(
_A , self.clip_extra_context_tokens * cross_attention_dim )
snake_case_ : Any = nn.Linear(_A , _A )
snake_case_ : Tuple = nn.LayerNorm(_A )
def UpperCAmelCase_ ( self : List[str] , *, _A : Tuple , _A : List[Any] , _A : str , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
snake_case_ : Optional[int] = image_embeddings.shape[0]
snake_case_ : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
snake_case_ : Optional[Any] = classifier_free_guidance_embeddings.expand(
_A , -1 )
snake_case_ : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
snake_case_ : str = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
snake_case_ : str = self.embedding_proj(_A )
snake_case_ : Dict = self.clip_image_embeddings_project_to_time_embeddings(_A )
snake_case_ : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
snake_case_ : List[str] = self.clip_extra_context_tokens_proj(_A )
snake_case_ : Optional[Any] = clip_extra_context_tokens.reshape(_A , -1 , self.clip_extra_context_tokens )
snake_case_ : int = clip_extra_context_tokens.permute(0 , 2 , 1 )
snake_case_ : Optional[int] = self.encoder_hidden_states_proj(_A )
snake_case_ : Any = self.text_encoder_hidden_states_norm(_A )
snake_case_ : Dict = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 88
| 0
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
UpperCamelCase__ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
UpperCamelCase__ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
UpperCamelCase__ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
UpperCamelCase__ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a__ ( _BaseAutoModelClass ):
_a : Tuple = FLAX_MODEL_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModel)
class a__ ( _BaseAutoModelClass ):
_a : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class a__ ( _BaseAutoModelClass ):
_a : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class a__ ( _BaseAutoModelClass ):
_a : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class a__ ( _BaseAutoModelClass ):
_a : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class a__ ( _BaseAutoModelClass ):
_a : Any = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class a__ ( _BaseAutoModelClass ):
_a : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class a__ ( _BaseAutoModelClass ):
_a : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class a__ ( _BaseAutoModelClass ):
_a : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class a__ ( _BaseAutoModelClass ):
_a : List[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class a__ ( _BaseAutoModelClass ):
_a : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class a__ ( _BaseAutoModelClass ):
_a : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class a__ ( _BaseAutoModelClass ):
_a : int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 92
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
_UpperCamelCase : Dict = global_rng
_UpperCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=7 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : int=2000 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=16000 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : List[str] = min_seq_length
_UpperCamelCase : Optional[int] = max_seq_length
_UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase : List[str] = feature_size
_UpperCamelCase : List[str] = padding_value
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Dict = return_attention_mask
_UpperCamelCase : Tuple = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCamelCase : int = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = WavaVecaFeatureExtractor
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase : Tuple = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_UpperCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test batched
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : Optional[int] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase : str = np.asarray(lowerCamelCase__ )
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : int = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='np' )
_UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[str] = range(800 ,1400 ,200 )
_UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Union[str, Any] = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_UpperCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : int = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Any = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase : Tuple = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
| 83
| 0
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_SCREAMING_SNAKE_CASE = Mapping[str, np.ndarray]
_SCREAMING_SNAKE_CASE = Mapping[str, Any] # Is a nested dict.
_SCREAMING_SNAKE_CASE = 0.0_1
@dataclasses.dataclass(frozen=lowerCAmelCase__ )
class __lowercase :
'''simple docstring'''
a : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
a : Optional[str] = None
# Templates used to generate this protein (prediction-only)
a : Optional[Sequence[str]] = None
# Chain corresponding to each parent
a : Optional[Sequence[int]] = None
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = r'''(\[[A-Z]+\]\n)'''
__lowercase = [tag.strip() for tag in re.split(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0]
__lowercase = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
__lowercase = ["N", "CA", "C"]
__lowercase = None
__lowercase = None
__lowercase = None
for g in groups:
if "[PRIMARY]" == g[0]:
__lowercase = g[1][0].strip()
for i in range(len(lowerCamelCase_ ) ):
if seq[i] not in residue_constants.restypes:
__lowercase = '''X''' # FIXME: strings are immutable
__lowercase = np.array(
[residue_constants.restype_order.get(lowerCamelCase_ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__lowercase = []
for axis in range(3 ):
tertiary.append(list(map(lowerCamelCase_ , g[1][axis].split() ) ) )
__lowercase = np.array(lowerCamelCase_ )
__lowercase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase_ ):
__lowercase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__lowercase = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
__lowercase = np.zeros(
(
len(lowerCamelCase_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase_ ):
__lowercase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCamelCase_ , atom_mask=lowerCamelCase_ , aatype=lowerCamelCase_ , residue_index=np.arange(len(lowerCamelCase_ ) ) , b_factors=lowerCamelCase_ , )
def _lowerCAmelCase ( lowerCamelCase_ : Protein , lowerCamelCase_ : int = 0 ):
__lowercase = []
__lowercase = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
__lowercase = prot.parents
__lowercase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__lowercase = [p for i, p in zip(lowerCamelCase_ , lowerCamelCase_ ) if i == chain_id]
if parents is None or len(lowerCamelCase_ ) == 0:
__lowercase = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(lowerCamelCase_ )}" )
return pdb_headers
def _lowerCAmelCase ( lowerCamelCase_ : Protein , lowerCamelCase_ : str ):
__lowercase = []
__lowercase = pdb_str.split('''\n''' )
__lowercase = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
__lowercase = 42
if prot.parents is not None and len(prot.parents ) > 0:
__lowercase = []
if prot.parents_chain_index is not None:
__lowercase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCamelCase_ ) , [] )
parent_dict[str(lowerCamelCase_ )].append(lowerCamelCase_ )
__lowercase = max([int(lowerCamelCase_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__lowercase = parent_dict.get(str(lowerCamelCase_ ) , ['''N/A'''] )
parents_per_chain.append(lowerCamelCase_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__lowercase = [['''N/A''']]
def make_parent_line(lowerCamelCase_ : Sequence[str] ) -> str:
return f"PARENT {' '.join(lowerCamelCase_ )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__lowercase = 0
for i, l in enumerate(lowerCamelCase_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCamelCase_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCamelCase_ ):
__lowercase = parents_per_chain[chain_counter]
else:
__lowercase = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowerCamelCase_ ) )
return "\n".join(lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Protein ):
__lowercase = residue_constants.restypes + ['''X''']
def res_atoa(lowerCamelCase_ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
__lowercase = residue_constants.atom_types
__lowercase = []
__lowercase = prot.atom_mask
__lowercase = prot.aatype
__lowercase = prot.atom_positions
__lowercase = prot.residue_index.astype(np.intaa )
__lowercase = prot.b_factors
__lowercase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
__lowercase = get_pdb_headers(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
pdb_lines.extend(lowerCamelCase_ )
__lowercase = aatype.shape[0]
__lowercase = 1
__lowercase = 0
__lowercase = string.ascii_uppercase
__lowercase = None
# Add all atom sites.
for i in range(lowerCamelCase_ ):
__lowercase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCamelCase_ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__lowercase = '''ATOM'''
__lowercase = atom_name if len(lowerCamelCase_ ) == 4 else f" {atom_name}"
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 1.00
__lowercase = atom_name[0] # Protein supports only C, N, O, S, this works.
__lowercase = ''''''
__lowercase = '''A'''
if chain_index is not None:
__lowercase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__lowercase = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(lowerCamelCase_ )
atom_index += 1
__lowercase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__lowercase = True
__lowercase = chain_index[i + 1]
if should_terminate:
# Close the chain.
__lowercase = '''TER'''
__lowercase = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(lowerCamelCase_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCamelCase_ , lowerCamelCase_ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase ( lowerCamelCase_ : FeatureDict , lowerCamelCase_ : ModelOutput , lowerCamelCase_ : Optional[np.ndarray] = None , lowerCamelCase_ : Optional[np.ndarray] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Sequence[str]] = None , lowerCamelCase_ : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowerCamelCase_ , remark=lowerCamelCase_ , parents=lowerCamelCase_ , parents_chain_index=lowerCamelCase_ , )
| 217
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _lowerCAmelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__lowercase = set()
# keep track of all the paths to be checked
__lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowercase = queue.pop(0 )
# get the last node from the path
__lowercase = path[-1]
if node not in explored:
__lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowercase = list(lowerCamelCase_ )
new_path.append(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase_ )
# in case there's no path between the 2 nodes
return []
def _lowerCAmelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : str , lowerCamelCase_ : str ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowercase = [start]
__lowercase = set(lowerCamelCase_ )
# Keep tab on distances from `start` node.
__lowercase = {start: 0, target: -1}
while queue:
__lowercase = queue.pop(0 )
if node == target:
__lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
__lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 217
| 1
|
from math import sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase : str = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase : Any = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase : str = list(range(2 , n + 1 ) )
lowercase : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase : Tuple = 0
# filters actual prime numbers.
lowercase : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
lowercase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
lowercase : Optional[Any] = 2
lowercase : Any = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Tuple = 0
# prime factorization of 'number'
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Union[str, Any] = 0
# prime factorization of 'number'
lowercase : Tuple = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
lowercase : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase : str = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
lowercase : Optional[Any] = 0
lowercase : List[Any] = None
# exit variable. for break up the loops
lowercase : Any = True
while i < len_pn and loop:
lowercase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase : Union[str, Any] = 0
while numbera != 0:
lowercase : Optional[int] = numbera % numbera
lowercase : Optional[int] = numbera
lowercase : Dict = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Dict = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Optional[Any] = 0
lowercase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase : Dict = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
lowercase : List[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase : Optional[int] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
lowercase : Dict = 0
lowercase : List[str] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase : List[str] = p_number_a + 1 # jump to the next number
lowercase : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase : Tuple = gcd(abs(SCREAMING_SNAKE_CASE__ ) , abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase : int = 0
lowercase : Union[str, Any] = 1
lowercase : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase : Optional[int] = ans
ans += fiba
lowercase : Optional[int] = tmp
return ans
| 20
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
def __init__( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = data
lowerCAmelCase__ : List[Any] = [0X67_452_301, 0Xef_cda_b89, 0X98_bad_cfe, 0X10_325_476, 0Xc3_d2e_1f0]
@staticmethod
def _lowerCamelCase ( a : int , a : List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_fff_fff
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = b'\x80' + b'\x00' * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ : List[str] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = list(struct.unpack('>16L' , a ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ : int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.padding()
lowerCAmelCase__ : List[Any] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ : str = self.expand_block(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ : Tuple = (b & c) | ((~b) & d)
lowerCAmelCase__ : int = 0X5a_827_999
elif 20 <= i < 40:
lowerCAmelCase__ : List[str] = b ^ c ^ d
lowerCAmelCase__ : Any = 0X6e_d9e_ba1
elif 40 <= i < 60:
lowerCAmelCase__ : Tuple = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ : Tuple = 0X8f_1bb_cdc
elif 60 <= i < 80:
lowerCAmelCase__ : List[Any] = b ^ c ^ d
lowerCAmelCase__ : int = 0Xca_62c_1d6
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = (
self.rotate(a , 5 ) + f + e + k + expanded_block[i] & 0Xff_fff_fff,
a,
self.rotate(a , 30 ),
c,
d,
)
lowerCAmelCase__ : Optional[Any] = (
self.h[0] + a & 0Xff_fff_fff,
self.h[1] + b & 0Xff_fff_fff,
self.h[2] + c & 0Xff_fff_fff,
self.h[3] + d & 0Xff_fff_fff,
self.h[4] + e & 0Xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[int] = b'Test String'
assert SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # noqa: S324
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : str = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase__ : List[Any] = f.read()
else:
lowerCAmelCase__ : Tuple = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
print(SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 212
| 0
|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : str = parent
_lowerCAmelCase : Any = 13
_lowerCAmelCase : str = 7
_lowerCAmelCase : Dict = 30
_lowerCAmelCase : Dict = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : Optional[Any] = [10, 50, 80]
_lowerCAmelCase : Union[str, Any] = 32
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Tuple = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : List[Any] = self.vocab_size - 1
_lowerCAmelCase : int = 0.0_1
def __A ( self ):
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = TFTransfoXLModel(lowercase_ )
_lowerCAmelCase , _lowerCAmelCase : Tuple = model(lowercase_ ).to_tuple()
_lowerCAmelCase : str = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model(lowercase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : str = TFTransfoXLLMHeadModel(lowercase_ )
_lowerCAmelCase , _lowerCAmelCase : Dict = model(lowercase_ ).to_tuple()
_lowerCAmelCase : str = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(lowercase_ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(lowercase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = TFTransfoXLForSequenceClassification(lowercase_ )
_lowerCAmelCase : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Optional[Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Union[str, Any] = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase_ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : Dict = model.get_output_embeddings()
assert isinstance(lowercase_ , tf.keras.layers.Layer )
_lowerCAmelCase : List[Any] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Dict = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : List[Any] = model.get_bias()
assert name is None
def __A ( self ):
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = TFTransfoXLModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : Any = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : int = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
| 369
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__( self , a__ , a__=2 , a__=3 , a__=4 , a__=2 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=36 , a__=3 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=6 , a__=6 , a__=3 , a__=4 , a__=None , a__=1000 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : str = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : str = text_seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Optional[int] = shape_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Optional[int] = text_seq_length
_lowerCAmelCase : Any = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Any = self.text_seq_length + self.image_seq_length
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase : List[str] = bbox[i, j, 1]
_lowerCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : int = bbox[i, j, 2]
_lowerCAmelCase : Optional[int] = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
_lowerCAmelCase : Optional[Any] = model(a__ , pixel_values=a__ )
_lowerCAmelCase : Any = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : List[Any] = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : Optional[Any] = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : str = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __A ( self ):
_lowerCAmelCase : Any = LayoutLMvaModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : List[str] = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
_lowerCAmelCase : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __A ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values.to(a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase : str = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
| 126
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 5_0257 , __lowerCamelCase: int = 1024 , __lowerCamelCase: int = 768 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 12 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: str = "gelu_new" , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 1e-5 , __lowerCamelCase: float = 0.02 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__: Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
UpperCamelCase__: List[str] = prefix_inner_dim
UpperCamelCase__: Optional[int] = prefix_hidden_dim
UpperCamelCase__: Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase__: Tuple = (
nn.Linear(self.prefix_hidden_dim , __lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase__: List[str] = GPTaConfig(
vocab_size=__lowerCamelCase , n_positions=__lowerCamelCase , n_embd=__lowerCamelCase , n_layer=__lowerCamelCase , n_head=__lowerCamelCase , n_inner=__lowerCamelCase , activation_function=__lowerCamelCase , resid_pdrop=__lowerCamelCase , embd_pdrop=__lowerCamelCase , attn_pdrop=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , initializer_range=__lowerCamelCase , scale_attn_weights=__lowerCamelCase , use_cache=__lowerCamelCase , scale_attn_by_inverse_layer_idx=__lowerCamelCase , reorder_and_upcast_attn=__lowerCamelCase , )
UpperCamelCase__: Any = GPTaLMHeadModel(__lowerCamelCase )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
UpperCamelCase__: Dict = self.encode_prefix(__lowerCamelCase )
UpperCamelCase__: List[Any] = self.decode_prefix(__lowerCamelCase )
UpperCamelCase__: str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase__: Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase__: Any = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase__: str = self.transformer(inputs_embeds=__lowerCamelCase , labels=__lowerCamelCase , attention_mask=__lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: int , __lowerCamelCase: torch.device ):
'''simple docstring'''
return torch.zeros(__lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return self.encode_prefix(__lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = torch.split(__lowerCamelCase , 1 , dim=0 )
UpperCamelCase__: Dict = []
UpperCamelCase__: Union[str, Any] = []
for feature in features:
UpperCamelCase__: Tuple = self.decode_prefix(feature.to(__lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.generate_beam(
input_embeds=__lowerCamelCase , device=__lowerCamelCase , eos_token_id=__lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: int = 5 , __lowerCamelCase: int = 67 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: Optional[int] = None , ):
'''simple docstring'''
UpperCamelCase__: Tuple = eos_token_id
UpperCamelCase__: List[str] = None
UpperCamelCase__: Any = None
UpperCamelCase__: Optional[int] = torch.ones(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.int )
UpperCamelCase__: Dict = torch.zeros(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase__: Dict = input_embeds
else:
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCamelCase__: Union[str, Any] = self.transformer(inputs_embeds=__lowerCamelCase )
UpperCamelCase__: Tuple = outputs.logits
UpperCamelCase__: Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase__: List[str] = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = logits.topk(__lowerCamelCase , -1 )
UpperCamelCase__: str = generated.expand(__lowerCamelCase , *generated.shape[1:] )
UpperCamelCase__ , UpperCamelCase__: Dict = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase__: int = next_tokens
else:
UpperCamelCase__: Optional[int] = tokens.expand(__lowerCamelCase , *tokens.shape[1:] )
UpperCamelCase__: str = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase__: Optional[Any] = -float(np.inf )
UpperCamelCase__: Any = 0
UpperCamelCase__: List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase__: Any = scores_sum / seq_lengths[:, None]
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = scores_sum_average.view(-1 ).topk(__lowerCamelCase , -1 )
UpperCamelCase__: Dict = next_tokens // scores_sum.shape[1]
UpperCamelCase__: Optional[int] = seq_lengths[next_tokens_source]
UpperCamelCase__: int = next_tokens % scores_sum.shape[1]
UpperCamelCase__: Optional[int] = next_tokens.unsqueeze(1 )
UpperCamelCase__: Tuple = tokens[next_tokens_source]
UpperCamelCase__: Tuple = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase__: List[Any] = generated[next_tokens_source]
UpperCamelCase__: int = scores_sum_average * seq_lengths
UpperCamelCase__: Dict = is_stopped[next_tokens_source]
UpperCamelCase__: List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase__: Any = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase__: Union[str, Any] = is_stopped + next_tokens.eq(__lowerCamelCase ).squeeze()
if is_stopped.all():
break
UpperCamelCase__: Optional[Any] = scores / seq_lengths
UpperCamelCase__: int = scores.argsort(descending=__lowerCamelCase )
# tokens tensors are already padded to max_seq_length
UpperCamelCase__: Dict = [tokens[i] for i in order]
UpperCamelCase__: Any = torch.stack(__lowerCamelCase , dim=0 )
UpperCamelCase__: int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( A_ = "https://www.worldometers.info/coronavirus"):
UpperCamelCase__: Union[str, Any] = BeautifulSoup(requests.get(A_).text ,"html.parser")
UpperCamelCase__: Tuple = soup.findAll("h1")
UpperCamelCase__: str = soup.findAll("div" ,{"class": "maincounter-number"})
keys += soup.findAll("span" ,{"class": "panel-title"})
values += soup.findAll("div" ,{"class": "number-table-main"})
return {key.text.strip(): value.text.strip() for key, value in zip(A_ ,A_)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"{key}\n{value}\n")
| 149
| 1
|
from functools import reduce
lowercase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 269
|
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
pass
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
pass
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = [
[],
[],
[],
]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Dict )-> str:
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] )-> Any:
"""simple docstring"""
lowercase__ = []
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
lowercase__ = min(self.queue )
self.queue.remove(a )
return data
def __str__( self : Union[str, Any] )-> str:
"""simple docstring"""
return str(self.queue )
def __UpperCamelCase () -> str:
lowercase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase () -> List[str]:
lowercase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 269
| 1
|
import numpy
# List of input, output pairs
_UpperCAmelCase : Union[str, Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Dict = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Optional[Any] = len(train_data)
_UpperCAmelCase : Optional[Any] = 0.009
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase="train" ) -> List[str]:
return calculate_hypothesis_value(_UpperCAmelCase , _UpperCAmelCase ) - output(
_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : str = 0
for i in range(len(_UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=m ) -> Dict:
lowerCamelCase__ : Union[str, Any] = 0
for i in range(_UpperCAmelCase ):
if index == -1:
summation_value += _error(_UpperCAmelCase )
else:
summation_value += _error(_UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : List[Any] = summation_of_cost_derivative(_UpperCAmelCase , _UpperCAmelCase ) / m
return cost_derivative_value
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ : List[str] = 0.000_002
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Tuple = 0
while True:
j += 1
lowerCamelCase__ : str = [0, 0, 0, 0]
for i in range(0 , len(_UpperCAmelCase ) ):
lowerCamelCase__ : Optional[Any] = get_cost_derivative(i - 1 )
lowerCamelCase__ : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCAmelCase , _UpperCAmelCase , atol=_UpperCAmelCase , rtol=_UpperCAmelCase , ):
break
lowerCamelCase__ : str = temp_parameter_vector
print(('Number of iterations:', j) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
for i in range(len(_UpperCAmelCase ) ):
print(('Actual output value:', output(_UpperCAmelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_UpperCAmelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 50
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11
| 0
|
'''simple docstring'''
def a_ ( __snake_case : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCamelCase_ =numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
lowerCamelCase_ =numbers[i]
if number < 0:
lowerCamelCase_ =min_till_now, max_till_now
lowerCamelCase_ =max(UpperCAmelCase__ , max_till_now * number )
lowerCamelCase_ =min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
lowerCamelCase_ =max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 370
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
SCREAMING_SNAKE_CASE : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : List[str] = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : List[str] = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
SCREAMING_SNAKE_CASE : Tuple = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : int = VOCAB_FILES_NAMES
lowercase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = PRETRAINED_INIT_CONFIGURATION
lowercase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = ElectraTokenizer
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase="[UNK]", lowerCamelCase="[SEP]", lowerCamelCase="[PAD]", lowerCamelCase="[CLS]", lowerCamelCase="[MASK]", lowerCamelCase=True, lowerCamelCase=None, **lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase', lowerCamelCase) != do_lower_case
or normalizer_state.get('strip_accents', lowerCamelCase) != strip_accents
or normalizer_state.get('handle_chinese_chars', lowerCamelCase) != tokenize_chinese_chars
):
_lowercase : Dict = getattr(lowerCamelCase, normalizer_state.pop('type'))
_lowercase : Optional[int] = do_lower_case
_lowercase : Tuple = strip_accents
_lowercase : str = tokenize_chinese_chars
_lowercase : Optional[Any] = normalizer_class(**lowerCamelCase)
_lowercase : Any = do_lower_case
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : Dict = [self.sep_token_id]
_lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
_lowercase : Optional[int] = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase)
return tuple(lowerCamelCase)
| 21
|
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 204
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 247
|
import qiskit
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> qiskit.result.counts.Counts:
UpperCamelCase__ : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ : str = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase__ : Union[str, Any] = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 247
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase = 16
__UpperCamelCase = 32
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ) -> int:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ : int ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
__lowerCamelCase , padding='longest' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase = mocked_dataloaders # noqa: F811
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowerCamelCase ) == "1":
SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE = int(config['seed'] )
SCREAMING_SNAKE_CASE = int(config['batch_size'] )
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=__lowerCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__lowerCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __lowerCamelCase )
def lowercase () -> List[str]:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 113
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self , A ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def snake_case_( self ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def __call__( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized
_SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A )} operations while'
f' destination module has {len(A )}.' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int:
print(F'Converting {name}...' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
_SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
print(F'Pushed {checkpoint_name}' )
def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any:
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = (1, num_labels)
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__snake_case : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
|
__snake_case : Any ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Tuple ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 94
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a =logging.get_logger(__name__)
a ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''conditional_detr'''
_UpperCAmelCase : int = ['''past_key_values''']
_UpperCAmelCase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : int=3_0_0 ,SCREAMING_SNAKE_CASE__ : str=6 ,SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : int=6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=8 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" ,SCREAMING_SNAKE_CASE__ : List[Any]=2_5_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Dict="sine" ,SCREAMING_SNAKE_CASE__ : int="resnet50" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[str]=1 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Dict=0.25 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowerCamelCase : str = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : int = backbone_config.get('model_type')
__lowerCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = use_timm_backbone
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : str = encoder_ffn_dim
__lowerCamelCase : Union[str, Any] = encoder_layers
__lowerCamelCase : Union[str, Any] = encoder_attention_heads
__lowerCamelCase : Union[str, Any] = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = decoder_layers
__lowerCamelCase : int = decoder_attention_heads
__lowerCamelCase : Optional[Any] = dropout
__lowerCamelCase : Optional[Any] = attention_dropout
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : int = init_xavier_std
__lowerCamelCase : Any = encoder_layerdrop
__lowerCamelCase : str = decoder_layerdrop
__lowerCamelCase : Dict = encoder_layers
__lowerCamelCase : List[str] = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
__lowerCamelCase : List[str] = backbone
__lowerCamelCase : Dict = use_pretrained_backbone
__lowerCamelCase : Union[str, Any] = dilation
# Hungarian matcher
__lowerCamelCase : Dict = class_cost
__lowerCamelCase : Dict = bbox_cost
__lowerCamelCase : Any = giou_cost
# Loss coefficients
__lowerCamelCase : List[str] = mask_loss_coefficient
__lowerCamelCase : Optional[int] = dice_loss_coefficient
__lowerCamelCase : Any = cls_loss_coefficient
__lowerCamelCase : str = bbox_loss_coefficient
__lowerCamelCase : str = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : int):
return self.d_model
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowerCamelCase : str = self.backbone_config.to_dict()
__lowerCamelCase : Optional[int] = self.__class__.model_type
return output
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Optional[int]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def lowerCAmelCase ( self : Optional[Any]):
return 1E-5
@property
def lowerCAmelCase ( self : str):
return 1_2
| 73
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" ,set() )
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
class a_ :
def __init__( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = metric_id
class a_ :
__A = [MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" ,HfhMock() )
@pytest.mark.parametrize(
"func, args" ,[(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Tuple ):
if "tmp_path" in args:
lowercase_ :Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowerCamelCase ,match="https://huggingface.co/docs/evaluate" ):
func(*__lowerCamelCase )
| 223
| 0
|
import math
def A ( lowercase = 100 ) -> int:
'''simple docstring'''
UpperCamelCase = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 110
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 110
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( __lowerCAmelCase ):
lowerCamelCase : UNetaDModel
lowerCamelCase : ScoreSdeVeScheduler
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2000 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
a : str = self.unet.config.sample_size
a : Tuple = (batch_size, 3, img_size, img_size)
a : Optional[int] = self.unet
a : Union[str, Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ ) * self.scheduler.init_noise_sigma
a : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase__ )
self.scheduler.set_sigmas(lowerCamelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a : List[Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
a : Dict = self.scheduler.step_correct(lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
# prediction step
a : Union[str, Any] = model(lowerCamelCase__ , lowerCamelCase__ ).sample
a : Optional[int] = self.scheduler.step_pred(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
a : Tuple = output.prev_sample, output.prev_sample_mean
a : Any = sample_mean.clamp(0 , 1 )
a : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : Tuple = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 105
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : Dict = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Union[str, Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : int = logging.getLogger(__name__)
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCamelCase : Union[str, Any] = args.per_device_eval_batch_size
__UpperCamelCase : List[str] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : Tuple = True
__UpperCamelCase : Union[str, Any] = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCamelCase : str = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCamelCase : int = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCamelCase : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : List[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
UpperCamelCase__ : str = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
UpperCamelCase__ : Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE )
# start time
UpperCamelCase__ : Union[str, Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCamelCase__ : List[Any] = time.time()
UpperCamelCase__ : int = end_time - start_time
UpperCamelCase__ : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : str = raw_datasets["validation"].column_names
__UpperCamelCase : List[Any] = "question" if "question" in column_names else column_names[0]
__UpperCamelCase : Dict = "context" if "context" in column_names else column_names[1]
__UpperCamelCase : str = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : List[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__UpperCamelCase : List[str] = min(args.max_seq_length, tokenizer.model_max_length)
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
UpperCamelCase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCamelCase__ : List[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCamelCase__ : int = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCamelCase__ : List[Any] = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCamelCase__ : Dict = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCamelCase__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCamelCase__ : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__UpperCamelCase : str = raw_datasets["validation"]
# Validation Feature Creation
__UpperCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCamelCase : Union[str, Any] = default_data_collator
__UpperCamelCase : List[str] = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCamelCase : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]="eval" ):
"""simple docstring"""
UpperCamelCase__ : List[str] = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCamelCase__ : List[str] = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
UpperCamelCase__ : Optional[Any] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
UpperCamelCase__ : int = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE )
__UpperCamelCase : int = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : Any = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__UpperCamelCase : str = 0.0
__UpperCamelCase : int = 0
__UpperCamelCase : List[Any] = timeit.default_timer()
__UpperCamelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase , __UpperCamelCase : Optional[Any] = outputs
__UpperCamelCase : List[str] = torch.tensor(start_logits)
__UpperCamelCase : Optional[Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : int = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCamelCase : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCamelCase : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCamelCase : int = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : Dict = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__UpperCamelCase : int = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 146
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=_lowerCAmelCase ):
lowercase : str = ["keras_nlp"]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""keras_nlp"""] )
| 369
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a_ ( self ):
UpperCamelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
UpperCamelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
UpperCamelCase : Dict = """xvjiarui/stable-diffusion-2-inpainting"""
UpperCamelCase , UpperCamelCase : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench"""
UpperCamelCase : List[str] = jax.random.PRNGKey(0 )
UpperCamelCase : Tuple = 50
UpperCamelCase : Dict = jax.device_count()
UpperCamelCase : Optional[int] = num_samples * [prompt]
UpperCamelCase : int = num_samples * [init_image]
UpperCamelCase : List[Any] = num_samples * [mask_image]
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
UpperCamelCase : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
UpperCamelCase : str = shard(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = shard(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = shard(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipeline(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = output.images.reshape(SCREAMING_SNAKE_CASE_ , 512 , 512 , 3 )
UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
UpperCamelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : Dict = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 27
| 0
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] ) -> str:
'''simple docstring'''
__lowerCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
__lowerCAmelCase = transform(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
return image
def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if "visual_encoder" in key:
__lowerCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , lowerCAmelCase__ )
if "blocks" in key:
__lowerCAmelCase = re.sub(r"""blocks""" , """layers""" , lowerCAmelCase__ )
if "attn" in key:
__lowerCAmelCase = re.sub(r"""attn""" , """self_attn""" , lowerCAmelCase__ )
if "norm1" in key:
__lowerCAmelCase = re.sub(r"""norm1""" , """layer_norm1""" , lowerCAmelCase__ )
if "norm2" in key:
__lowerCAmelCase = re.sub(r"""norm2""" , """layer_norm2""" , lowerCAmelCase__ )
if "encoder.norm" in key:
__lowerCAmelCase = re.sub(r"""encoder.norm""" , """post_layernorm""" , lowerCAmelCase__ )
if "encoder.patch_embed.proj" in key:
__lowerCAmelCase = re.sub(r"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , lowerCAmelCase__ )
if "encoder.pos_embed" in key:
__lowerCAmelCase = re.sub(r"""encoder.pos_embed""" , """embeddings.position_embedding""" , lowerCAmelCase__ )
if "encoder.cls_token" in key:
__lowerCAmelCase = re.sub(r"""encoder.cls_token""" , """embeddings.class_embedding""" , lowerCAmelCase__ )
if "self_attn" in key:
__lowerCAmelCase = re.sub(r"""self_attn.proj""" , """self_attn.projection""" , lowerCAmelCase__ )
return key
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : Any=None ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = BlipConfig.from_pretrained(lowerCAmelCase__ )
else:
__lowerCAmelCase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
__lowerCAmelCase = BlipForConditionalGeneration(lowerCAmelCase__ ).eval()
__lowerCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__lowerCAmelCase = blip_decoder(pretrained=lowerCAmelCase__ , image_size=3_84 , vit="""base""" )
__lowerCAmelCase = pt_model.eval()
__lowerCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(lowerCAmelCase__ )
__lowerCAmelCase = rename_key(lowerCAmelCase__ )
__lowerCAmelCase = value
hf_model.load_state_dict(lowerCAmelCase__ )
__lowerCAmelCase = 3_84
__lowerCAmelCase = load_demo_image(image_size=lowerCAmelCase__ , device="""cpu""" )
__lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCAmelCase = tokenizer(["""a picture of"""] ).input_ids
__lowerCAmelCase = hf_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
__lowerCAmelCase = hf_model.generate(lowerCAmelCase__ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowerCAmelCase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__lowerCAmelCase = blip_vqa(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit="""base""" )
vqa_model.eval()
__lowerCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(lowerCAmelCase__ )
__lowerCAmelCase = rename_key(lowerCAmelCase__ )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForQuestionAnswering(lowerCAmelCase__ )
hf_vqa_model.load_state_dict(lowerCAmelCase__ )
__lowerCAmelCase = ['''How many dogs are in this image?''']
__lowerCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="""pt""" ).input_ids
__lowerCAmelCase = hf_vqa_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
__lowerCAmelCase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__lowerCAmelCase = blip_itm(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit="""base""" )
itm_model.eval()
__lowerCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(lowerCAmelCase__ )
__lowerCAmelCase = rename_key(lowerCAmelCase__ )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForImageTextRetrieval(lowerCAmelCase__ )
__lowerCAmelCase = ['''A picture of a woman with a dog sitting in a beach''']
__lowerCAmelCase = tokenizer(
lowerCAmelCase__ , return_tensors="""pt""" , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase__ )
hf_itm_model.eval()
__lowerCAmelCase = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
__lowerCAmelCase = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_A : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 229
|
def __UpperCamelCase ( lowerCAmelCase__ : int = 5_0_0_0_0_0_0_0 ):
__a : int = set()
__a : str = int((limit - 2_4) ** (1 / 2) )
__a : int = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) )
for primea in primes:
__a : Union[str, Any] = primea * primea
for primea in primes:
__a : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
__a : int = primea * primea * primea * primea
__a : Union[str, Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 216
| 0
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a :Dict = logging.getLogger(__name__)
a :Optional[int] = "pytorch_model.bin"
@dataclasses.dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""})
_SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""})
_SCREAMING_SNAKE_CASE :str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""})
_SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """A csv or a json file containing the validation data."""})
_SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """The name of the task to train on."""} , )
_SCREAMING_SNAKE_CASE :Optional[List[str]] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """The list of labels for the task."""})
@dataclasses.dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""})
_SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""})
_SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
_SCREAMING_SNAKE_CASE :Optional[int] = dataclasses.field(
default=1_00 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_SCREAMING_SNAKE_CASE :Optional[int] = dataclasses.field(
default=UpperCamelCase_ , metadata={"""help""": """Random seed for initialization."""} , )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.filter(lambda __lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(eval_result * len(__lowerCAmelCase ) )
print(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset.sort("""probability""" , reverse=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = dataset.select(range(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = dataset.remove_columns(["""label""", """probability"""] )
SCREAMING_SNAKE_CASE__ : List[Any] = dataset.rename_column("""prediction""" , """label""" )
SCREAMING_SNAKE_CASE__ : int = dataset.map(lambda __lowerCAmelCase : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(__lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCAmelCase , index=__lowerCAmelCase )
else:
dataset.to_json(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = STModelArguments(model_name_or_path=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = STDataArguments(train_file=__lowerCAmelCase , infer_file=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = STTrainingArguments(output_dir=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCAmelCase ).items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Sanity checks
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.train_file
SCREAMING_SNAKE_CASE__ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE__ : Optional[Any] = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE__ : str = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = F'''{args.output_dir}/self-train_iter-{{}}'''.format
SCREAMING_SNAKE_CASE__ : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Dict = False
# Show the progress bar
SCREAMING_SNAKE_CASE__ : int = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = data_dir_format(__lowerCAmelCase )
assert os.path.exists(__lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(__lowerCAmelCase , """stage-1""" )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , """best-checkpoint""" , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , __lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(__lowerCAmelCase , """best-checkpoint""" )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(__lowerCAmelCase , """stage-2""" )
# Update arguments_dict
SCREAMING_SNAKE_CASE__ : Optional[int] = model_path
SCREAMING_SNAKE_CASE__ : str = data_files["""train"""]
SCREAMING_SNAKE_CASE__ : Tuple = current_output_dir
SCREAMING_SNAKE_CASE__ : str = os.path.join(__lowerCAmelCase , """best-checkpoint""" , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = iteration
SCREAMING_SNAKE_CASE__ : Tuple = data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase , """best-checkpoint""" ) )
SCREAMING_SNAKE_CASE__ : Dict = config.idalabel
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """eval_results_best-checkpoint.json""" )
SCREAMING_SNAKE_CASE__ : str = os.path.join(__lowerCAmelCase , """test_results_best-checkpoint.json""" )
assert os.path.exists(__lowerCAmelCase )
with open(__lowerCAmelCase , """r""" ) as f:
SCREAMING_SNAKE_CASE__ : int = float(json.load(__lowerCAmelCase )[args.eval_metric] )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(__lowerCAmelCase , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(__lowerCAmelCase )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
SCREAMING_SNAKE_CASE__ : Dict = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__lowerCAmelCase ):
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(__lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE__ : Optional[Any] = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE__ : int = new_iteration
SCREAMING_SNAKE_CASE__ : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_iteration
SCREAMING_SNAKE_CASE__ : Dict = new_eval_result
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE__ : Any = new_iteration
SCREAMING_SNAKE_CASE__ : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE__ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , __lowerCAmelCase )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(__lowerCAmelCase , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__lowerCAmelCase , """eval_results_best-iteration.json""" ) , )
| 56
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a :Dict = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[int] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :List[str] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56
| 1
|
"""simple docstring"""
def _lowercase ( __snake_case ) -> bool:
__lowerCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(__snake_case ,__snake_case ,__snake_case ,__snake_case )
for node in graph )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> bool:
visited.add(__snake_case )
rec_stk.add(__snake_case )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__snake_case ,__snake_case ,__snake_case ,__snake_case ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__snake_case )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 269
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 269
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Tuple:
"""simple docstring"""
lowercase_ : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = StableDiffusionLatentUpscalePipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([])
UpperCamelCase__ = True
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[str] = 1
lowercase_ : Union[str, Any] = 4
lowercase_ : Optional[Any] = (16, 16)
lowercase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
torch.manual_seed(0 )
lowercase_ : int = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=lowercase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=lowercase_ , only_cross_attention=lowercase_ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
lowercase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
lowercase_ : int = EulerDiscreteScheduler(prediction_type="""sample""" )
lowercase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
lowercase_ : int = CLIPTextModel(lowercase_ )
lowercase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ : List[str] = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : Optional[Any]=0 ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : Optional[int] = torch.manual_seed(lowercase_ )
else:
lowercase_ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[Any] = """cpu"""
lowercase_ : Any = self.get_dummy_components()
lowercase_ : str = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[int] = self.get_dummy_inputs(lowercase_ )
lowercase_ : List[Any] = pipe(**lowercase_ ).images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowercase_ : Any = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
lowercase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
super().test_save_load_local(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : int = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
lowercase_ : Union[str, Any] = self.get_dummy_components()
lowercase_ : Dict = self.pipeline_class(**lowercase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Dict = self.get_dummy_inputs(lowercase_ )
lowercase_ : Optional[int] = 2
lowercase_ : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase_ : Union[str, Any] = getattr(lowercase_ , scheduler_enum.name )
lowercase_ : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
lowercase_ : Union[str, Any] = pipe(**lowercase_ )[0]
outputs.append(lowercase_ )
assert check_same_shape(lowercase_ )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = torch.manual_seed(33 )
lowercase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowercase_ : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase_ : List[Any] = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
lowercase_ : int = pipe(lowercase_ , generator=lowercase_ , output_type="""latent""" ).images
lowercase_ : Optional[int] = upscaler(
prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="""np""" , ).images[0]
lowercase_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = torch.manual_seed(33 )
lowercase_ : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase_ : Union[str, Any] = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
lowercase_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
lowercase_ : Dict = upscaler(
prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="""np""" , ).images[0]
lowercase_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 365
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __magic_name__ ( ctypes.Structure):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase_ : List[Any] = CursorInfo()
lowercase_ : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> str:
if os.name == "nt":
lowercase_ : int = CursorInfo()
lowercase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__ , ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 21
| 0
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs
| 6
| 0
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__snake_case : int = logging.get_logger(__name__)
__snake_case : Optional[int] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__snake_case : List[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__snake_case : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__snake_case : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__snake_case : List[str] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__snake_case : Any = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__snake_case : Dict = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__snake_case : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__snake_case : Optional[Any] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__snake_case : Optional[Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__snake_case : int = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__snake_case : List[Any] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__snake_case : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__snake_case : Dict = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__snake_case : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__snake_case : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__snake_case : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__snake_case : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__snake_case : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__snake_case : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__snake_case : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__snake_case : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__snake_case : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__snake_case : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
__snake_case : Any = auto_class_update(FlaxAutoModel)
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__snake_case : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__snake_case : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__snake_case : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__snake_case : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__snake_case : List[str] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__snake_case : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__snake_case : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__snake_case : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__snake_case : Any = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A__ ( _BaseAutoModelClass ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__snake_case : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 58
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __lt__( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self: int , _SCREAMING_SNAKE_CASE: Any) -> Tuple:
"""simple docstring"""
return self[-1] == other[-1]
def _lowercase ( __snake_case ) -> list:
__lowerCAmelCase : list[Stack] = []
# sort into stacks
for element in collection:
__lowerCAmelCase : Dict = Stack([element] )
__lowerCAmelCase : str = bisect_left(__snake_case ,__snake_case )
if i != len(__snake_case ):
stacks[i].append(__snake_case )
else:
stacks.append(__snake_case )
# use a heap-based merge to merge stack efficiently
__lowerCAmelCase : Union[str, Any] = merge(*(reversed(__snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Optional[int] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 58
| 1
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A__ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , lowercase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
assert _test_patching.open is open
A__ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , lowercase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _SCREAMING_SNAKE_CASE ( ) -> int:
# pandas.read_csv is not present in _test_patching
A__ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , lowercase_ ):
pass
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A__ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , lowercase_ ) is None
with patch_submodule(_test_patching , "len" , lowercase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
A__ = "__test_patch_submodule_start_and_stop_mock__"
A__ = patch_submodule(_test_patching , "open" , lowercase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A__ = "__test_patch_submodule_successive_join__"
A__ = "__test_patch_submodule_successive_dirname__"
A__ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , lowercase_ ):
with patch_submodule(_test_patching , "os.rename" , lowercase_ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowercase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , lowercase_ ):
with patch_submodule(_test_patching , "os.path.join" , lowercase_ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowercase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _SCREAMING_SNAKE_CASE ( ) -> Any:
A__ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , lowercase_ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , lowercase_ ):
pass
| 247
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''megatron-bert'''
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=29_056 , snake_case_ : int=1_024 , snake_case_ : Optional[int]=24 , snake_case_ : str=16 , snake_case_ : str=4_096 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=512 , snake_case_ : Optional[int]=2 , snake_case_ : Dict=0.02 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : Optional[Any]=0 , snake_case_ : int="absolute" , snake_case_ : List[str]=True , **snake_case_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
| 247
| 1
|
"""simple docstring"""
def a__ ( __lowercase ) -> int:
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_A = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_A = grid[row_n]
_A = fill_row(__lowercase , __lowercase )
_A = grid[row_n]
return grid[-1][-1]
def a__ ( __lowercase , __lowercase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 't5'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any] , a__ : Optional[int]=3_21_28 , a__ : Any=5_12 , a__ : Any=64 , a__ : List[str]=20_48 , a__ : Tuple=6 , a__ : Dict=None , a__ : Optional[int]=8 , a__ : int=32 , a__ : List[str]=1_28 , a__ : Optional[Any]=0.1 , a__ : Union[str, Any]=1E-6 , a__ : Dict=1.0 , a__ : Optional[int]="relu" , a__ : Tuple=True , a__ : Any=True , a__ : Tuple=0 , a__ : Optional[Any]=1 , **a__ : Tuple , ) -> int:
'''simple docstring'''
_A = vocab_size
_A = d_model
_A = d_kv
_A = d_ff
_A = num_layers
_A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_A = num_heads
_A = relative_attention_num_buckets
_A = relative_attention_max_distance
_A = dropout_rate
_A = layer_norm_epsilon
_A = initializer_factor
_A = feed_forward_proj
_A = use_cache
_A = self.feed_forward_proj.split("-" )
_A = act_info[-1]
_A = act_info[0] == "gated"
if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_A = "gelu_new"
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , **a__ , )
class snake_case ( _UpperCamelCase):
@property
def a_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_A = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_A = "past_encoder_sequence + sequence"
_A = {0: "batch"}
_A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A = {0: "batch", 1: "decoder_sequence"}
_A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
return common_inputs
@property
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 13
| 163
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__SCREAMING_SNAKE_CASE = (left + right) >> 1 # the middle
__SCREAMING_SNAKE_CASE = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
__SCREAMING_SNAKE_CASE = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 54
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54
| 1
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Any = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_lowerCamelCase , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_: str = controlnet_params
UpperCamelCase_: int = 'bird'
UpperCamelCase_: Union[str, Any] = jax.device_count()
UpperCamelCase_: Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase_: Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
UpperCamelCase_: Optional[Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCamelCase_: List[Any] = jax.random.PRNGKey(0 )
UpperCamelCase_: Optional[Any] = jax.random.split(_lowerCamelCase , jax.device_count() )
UpperCamelCase_: Any = replicate(_lowerCamelCase )
UpperCamelCase_: Tuple = shard(_lowerCamelCase )
UpperCamelCase_: int = shard(_lowerCamelCase )
UpperCamelCase_: str = pipe(
prompt_ids=_lowerCamelCase , image=_lowerCamelCase , params=_lowerCamelCase , prng_seed=_lowerCamelCase , num_inference_steps=5_0 , jit=_lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCamelCase_: Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_: Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: List[str] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_ ,UpperCamelCase_: List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_lowerCamelCase , from_pt=_lowerCamelCase , dtype=jnp.bfloataa )
UpperCamelCase_: int = controlnet_params
UpperCamelCase_: Union[str, Any] = 'Chef in the kitchen'
UpperCamelCase_: List[Any] = jax.device_count()
UpperCamelCase_: Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCamelCase_: List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
UpperCamelCase_: Tuple = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCamelCase_: Any = jax.random.PRNGKey(0 )
UpperCamelCase_: str = jax.random.split(_lowerCamelCase , jax.device_count() )
UpperCamelCase_: Optional[int] = replicate(_lowerCamelCase )
UpperCamelCase_: int = shard(_lowerCamelCase )
UpperCamelCase_: Dict = shard(_lowerCamelCase )
UpperCamelCase_: int = pipe(
prompt_ids=_lowerCamelCase , image=_lowerCamelCase , params=_lowerCamelCase , prng_seed=_lowerCamelCase , num_inference_steps=5_0 , jit=_lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCamelCase_: int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_: Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: int = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 292
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: int = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Any = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292
| 1
|
"""simple docstring"""
from math import pow, sqrt
def _snake_case ( *snake_case__ : float ):
A = len(snake_case__ ) > 0 and all(value > 0.0 for value in values )
return result
def _snake_case ( snake_case__ : float , snake_case__ : float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 74
|
lowerCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def snake_case_ ( ):
__lowercase : List[str] = input("""Enter message: """ )
__lowercase : int = input("""Enter key [alphanumeric]: """ )
__lowercase : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowercase : Optional[int] = """encrypt"""
__lowercase : Dict = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("""d""" ):
__lowercase : Union[str, Any] = """decrypt"""
__lowercase : Optional[int] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"\n{mode.title()}ed message:" )
print(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """encrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """decrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = []
__lowercase : Tuple = 0
__lowercase : Dict = key.upper()
for symbol in message:
__lowercase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
__lowercase : str = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 233
| 0
|
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] = 2_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = 0
for i in range(_lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"{solution() = }")
| 363
|
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Any = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , 0 )
SCREAMING_SNAKE_CASE__ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _a ( SCREAMING_SNAKE_CASE__ : int = 30 ) -> int:
'''simple docstring'''
return _calculate(SCREAMING_SNAKE_CASE__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 191
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.