code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import cmath
import math
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> complex:
"""simple docstring"""
snake_case_ : Optional[int] = math.radians(_UpperCamelCase )
snake_case_ : List[str] = math.radians(_UpperCamelCase )
# Convert voltage and current to rectangular form
snake_case_ : Optional[Any] = cmath.rect(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = cmath.rect(_UpperCamelCase , _UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return None
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Dict = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
from transformers import BertModel
snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__magic_name__ ) )
vocab_file.flush()
snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) )
model.save_pretrained(__magic_name__ )
self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ )
@require_tf
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
snake_case_ : List[str] = quantize(Path(__magic_name__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
snake_case_ : Any = quantize(__magic_name__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
return path
except Exception as e:
self.fail(__magic_name__ )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
from transformers import TFBertModel
snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ )
# Assert all variables are present
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __magic_name__ )
self.assertSequenceEqual(variable_names[3:] , __magic_name__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__magic_name__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 279
| 1
|
from __future__ import annotations
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Tuple = len(lowerCamelCase ) // 2
# choose the middle 3 elements
UpperCamelCase_: Union[str, Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase_ : str = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Union[str, Any] = {
"""RUCAIBox/mvp""": 10_24,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
__UpperCamelCase : int = MvpTokenizer
def __init__( self : Union[str, Any] , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple="replace" , snake_case_ : Dict="<s>" , snake_case_ : Dict="</s>" , snake_case_ : Tuple="</s>" , snake_case_ : int="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Any="<mask>" , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
UpperCamelCase_: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase_: int = add_prefix_space
UpperCamelCase_: int = pre_tok_class(**snake_case_ )
UpperCamelCase_: int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_: Tuple = """post_processor"""
UpperCamelCase_: Optional[int] = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
UpperCamelCase_: Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_: Union[str, Any] = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase_: List[Any] = tuple(state["""cls"""] )
UpperCamelCase_: List[str] = False
if state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: str = add_prefix_space
UpperCamelCase_: Union[str, Any] = True
if state.get("""trim_offsets""" , snake_case_ ) != trim_offsets:
UpperCamelCase_: Optional[int] = trim_offsets
UpperCamelCase_: Any = True
if changes_to_apply:
UpperCamelCase_: Optional[int] = getattr(snake_case_ , state.pop("""type""" ) )
UpperCamelCase_: Union[str, Any] = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Dict ):
UpperCamelCase_: List[str] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
UpperCamelCase_: Optional[int] = value
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : Dict , **snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[int] = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , *snake_case_ : Dict , **snake_case_ : Tuple ):
UpperCamelCase_: Any = kwargs.get("""is_split_into_words""" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: Optional[int] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any=None ):
UpperCamelCase_: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Tuple = [self.sep_token_id]
UpperCamelCase_: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _snake_case ( _snake_case : List[Any]="" ):
lowerCAmelCase : List[str] = tempfile.mkdtemp()
return os.path.join(_snake_case , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
lowerCAmelCase : Tuple = AgentAudio(UpperCamelCase_ )
lowerCAmelCase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
lowerCAmelCase, lowerCAmelCase : str = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1E-4 ) )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
lowerCAmelCase : int = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 1_6_0_0_0 )
lowerCAmelCase : Optional[int] = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
lowerCAmelCase : List[str] = AgentImage(UpperCamelCase_ )
lowerCAmelCase : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCAmelCase : Optional[int] = Image.open(UpperCamelCase_ )
lowerCAmelCase : int = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCAmelCase : Tuple = Image.open(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = '''Hey!'''
lowerCAmelCase : Dict = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 60
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : int ):
for param in module.parameters():
lowerCAmelCase : Optional[int] = False
def _snake_case ( ):
lowerCAmelCase : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase : Any = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _snake_case ( _snake_case : Dict ):
lowerCAmelCase : Optional[int] = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ):
lowerCAmelCase : List[str] = datetime.now()
lowerCAmelCase : Union[str, Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 60
| 1
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def __A ( a_ :Optional[int] , a_ :Any , a_ :List[str] , a_ :int) -> str:
__a : Union[str, Any] = sorted(zip(a_ , a_) , key=lambda a_: x[0] / x[1] , reverse=a_)
__a , __a : Dict = [i[0] for i in r], [i[1] for i in r]
__a : List[Any] = list(accumulate(a_))
__a : str = bisect(a_ , a_)
return (
0
if k == 0
else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k])
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''image_processor''', '''tokenizer''']
__lowerCAmelCase = '''CLIPImageProcessor'''
__lowerCAmelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
__a : Any = kwargs.pop('''feature_extractor''' )
__a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a : Any = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
__a : List[str] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__a : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.tokenizer.model_input_names
__a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 188
| 1
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__A : List[str] = TypeVar('''T''')
class _UpperCAmelCase ( Generic[T] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 42 # Cache store of keys
SCREAMING_SNAKE_CASE_ : Any = 42 # References of the keys in cache
SCREAMING_SNAKE_CASE_ : List[str] = 10 # Maximum capacity of cache
def __init__( self : Any , A : List[Any] ) -> None:
lowercase_ : List[Any] = deque()
lowercase_ : Union[str, Any] = set()
if not n:
lowercase_ : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowercase_ : str = n
def A ( self : List[Any] , A : List[Any] ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(__lowerCamelCase )
else:
self.dq_store.remove(__lowerCamelCase )
self.dq_store.appendleft(__lowerCamelCase )
self.key_reference.add(__lowerCamelCase )
def A ( self : Any ) -> None:
for k in self.dq_store:
print(__lowerCamelCase )
def __repr__( self : Any ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Union[str, Any] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 33
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11
| 0
|
'''simple docstring'''
import socket
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__magic_name__ : Union[str, Any] = socket.gethostname()
__magic_name__ : int = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__magic_name__ : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 275
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = LxmertTokenizer
A_ : List[Any] = LxmertTokenizerFast
A_ : int = True
A_ : Any = True
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
super().setUp()
__magic_name__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : Any , _A : str ) -> List[Any]:
__magic_name__ : Dict = 'UNwant\u00E9d,running'
__magic_name__ : Dict = 'unwanted, running'
return input_text, output_text
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
__magic_name__ : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def __lowerCAmelCase ( self : int ) -> List[Any]:
if not self.test_rust_tokenizer:
return
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Optional[Any] = self.get_rust_tokenizer()
__magic_name__ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
__magic_name__ : List[Any] = tokenizer.tokenize(_A )
__magic_name__ : Dict = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
__magic_name__ : Union[str, Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__magic_name__ : List[Any] = self.get_rust_tokenizer()
__magic_name__ : str = tokenizer.encode(_A )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
| 275
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_snake_case = pytest.mark.integration
_snake_case = {'comet'}
_snake_case = importlib.util.find_spec('fairseq') is not None
_snake_case = {'code_eval'}
_snake_case = os.name == 'nt'
_snake_case = {'bertscore', 'frugalscore', 'perplexity'}
_snake_case = importlib.util.find_spec('transformers') is not None
def _A ( snake_case ) -> Tuple:
@wraps(snake_case )
def wrapper(self , snake_case ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , snake_case )
return wrapper
def _A ( snake_case ) -> Optional[int]:
@wraps(snake_case )
def wrapper(self , snake_case ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , snake_case )
return wrapper
def _A ( snake_case ) -> List[Any]:
@wraps(snake_case )
def wrapper(self , snake_case ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , snake_case )
return wrapper
def _A ( ) -> List[Any]:
_lowercase : Any = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@local
class a__ ( parameterized.TestCase ):
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : Any = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = "[...]"
_lowercase : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _UpperCamelCase ) ).module_path )
_lowercase : Union[str, Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=_UpperCamelCase )
# check parameters
_lowercase : str = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_UpperCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
_lowercase : int = doctest.testmod(_UpperCamelCase , verbose=_UpperCamelCase , raise_on_error=_UpperCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = "[...]"
_lowercase : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _UpperCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
_lowercase : str = doctest.testmod(_UpperCamelCase , verbose=_UpperCamelCase , raise_on_error=_UpperCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCamelCase ):
yield
else:
yield
@contextmanager
def _lowerCamelCase ( self ):
"""simple docstring"""
def load_local_metric(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
return load_metric(os.path.join("metrics" , _UpperCamelCase ) , *_UpperCamelCase , **_UpperCamelCase )
with patch("datasets.load_metric" ) as mock_load_metric:
_lowercase : List[Any] = load_local_metric
yield
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase ):
"""simple docstring"""
def wrapper(_UpperCamelCase ):
_lowercase : str = contextmanager(_UpperCamelCase )
_lowercase : Any = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def _A ( snake_case ) -> List[Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class a__ ( lowerCamelCase_ ):
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
_lowercase : List[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def _A ( snake_case ) -> Tuple:
import torch
def bert_cos_score_idf(snake_case , snake_case , *snake_case , **snake_case ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
_lowercase : List[str] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def _A ( snake_case ) -> Optional[int]:
def load_from_checkpoint(snake_case ):
class a__ :
def _lowerCamelCase ( self , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) == 2
_lowercase : Tuple = [0.1_9, 0.9_2]
return scores, sum(_UpperCamelCase ) / len(_UpperCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
_lowercase : Union[str, Any] = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
_lowercase : str = load_from_checkpoint
yield
def _A ( ) -> Optional[Any]:
_lowercase : str = load_metric(os.path.join("metrics" , "seqeval" ) )
_lowercase : Optional[int] = "ERROR"
_lowercase : Any = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
metric.compute(predictions=[] , references=[] , scheme=snake_case )
| 250
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
assert isinstance(_UpperCAmelCase, _UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
A__ : Optional[Any] =f"The input value of [n={number}] has to be > 0"
raise ValueError(_UpperCAmelCase )
else:
A__ : Optional[int] =sylvester(number - 1 )
A__ : Tuple =num - 1
A__ : Dict =num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 350
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCamelCase :
'''simple docstring'''
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ : str =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : int =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Union[str, Any] =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Dict =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : Union[str, Any] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : str =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Dict =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Optional[int] =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : int =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A__ : List[str] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.get_dummy_components()
A__ : str =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[int] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : str =inputs["""prompt"""]
A__ : Optional[int] =inputs["""generator"""]
A__ : Optional[Any] =inputs["""num_inference_steps"""]
A__ : Union[str, Any] =inputs["""output_type"""]
if "image" in inputs:
A__ : Union[str, Any] =inputs["""image"""]
else:
A__ : List[Any] =None
if "mask_image" in inputs:
A__ : Union[str, Any] =inputs["""mask_image"""]
else:
A__ : Tuple =None
if "original_image" in inputs:
A__ : Optional[Any] =inputs["""original_image"""]
else:
A__ : Tuple =None
A__ , A__ : Optional[Any] =pipe.encode_prompt(lowerCAmelCase_ )
# inputs with prompt converted to embeddings
A__ : Optional[int] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : Tuple =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : int =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) is None , f"`{optional_component}` did not stay set to None after loading." , )
A__ : Dict =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : int =inputs["""generator"""]
A__ : str =inputs["""num_inference_steps"""]
A__ : Optional[Any] =inputs["""output_type"""]
# inputs with prompt converted to embeddings
A__ : List[Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : int =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
A__ : List[str] =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Union[str, Any] =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.get_dummy_components()
A__ : int =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : List[Any] =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : List[Any] =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Tuple =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Tuple =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
| 136
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__magic_name__ : int = '''ZinengTang/tvlt-base'''
__magic_name__ : Union[str, Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self : List[str] , **_A : int ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_A )
def __lowerCAmelCase ( self : List[Any] , **_A : List[Any] ) -> Union[str, Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : Optional[Any] = self.get_feature_extractor()
__magic_name__ : Union[str, Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _A )
self.assertIsInstance(processor.image_processor , _A )
def __lowerCAmelCase ( self : Dict ) -> str:
__magic_name__ : str = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : Union[str, Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
__magic_name__ : Union[str, Any] = np.ones([12000] )
__magic_name__ : str = feature_extractor(_A , return_tensors='np' )
__magic_name__ : int = processor(audio=_A , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
__magic_name__ : Any = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_feature_extractor()
__magic_name__ : List[str] = TvltProcessor(image_processor=_A , feature_extractor=_A )
__magic_name__ : str = np.ones([3, 224, 224] )
__magic_name__ : Optional[Any] = image_processor(_A , return_tensors='np' )
__magic_name__ : str = processor(images=_A , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = self.get_image_processor()
__magic_name__ : Optional[int] = self.get_feature_extractor()
__magic_name__ : str = TvltProcessor(image_processor=_A , feature_extractor=_A )
__magic_name__ : Optional[int] = np.ones([12000] )
__magic_name__ : int = np.ones([3, 224, 224] )
__magic_name__ : Optional[Any] = processor(audio=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __lowerCAmelCase ( self : int ) -> Dict:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : int = self.get_feature_extractor()
__magic_name__ : List[Any] = TvltProcessor(image_processor=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 331
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10**12 ) -> int:
lowercase__ = 1
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 269
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : str , a : int = 128 , a : int = 256 , a : float = 2000.0 , a : int = 768 , a : int = 12 , a : int = 12 , a : int = 64 , a : int = 2_048 , a : float = 0.1 , )-> Any:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Sequential(
nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , )
lowercase__ = nn.Embedding(a , a )
lowercase__ = False
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Dropout(p=a )
lowercase__ = nn.ModuleList()
for lyr_num in range(a ):
# FiLM conditional T5 decoder
lowercase__ = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a )
self.decoders.append(a )
lowercase__ = TaLayerNorm(a )
lowercase__ = nn.Dropout(p=a )
lowercase__ = nn.Linear(a , a , bias=a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Dict , a : int , a : int )-> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase__ = self.conditioning_emb(a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase__ = torch.broadcast_to(
torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase__ = self.position_encoding(a )
lowercase__ = self.continuous_inputs_projection(a )
inputs += position_encodings
lowercase__ = self.dropout(a )
# decoder: No padding present.
lowercase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase__ = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase__ = lyr(
a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0]
lowercase__ = self.decoder_norm(a )
lowercase__ = self.post_dropout(a )
lowercase__ = self.spec_out(a )
return spec_out
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : List[Any] , a : str , a : Union[str, Any] , a : Optional[int] , a : Optional[int] , a : Tuple=1E-6 )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str , a : Union[str, Any]=None , a : Dict=None , a : Union[str, Any]=None , a : List[Any]=None , a : List[str]=None , )-> List[Any]:
"""simple docstring"""
lowercase__ = self.layer[0](
a , conditioning_emb=a , attention_mask=a , )
if encoder_hidden_states is not None:
lowercase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
lowercase__ = self.layer[1](
a , key_value_states=a , attention_mask=a , )
# Apply Film Conditional Feed Forward layer
lowercase__ = self.layer[-1](a , a )
return (hidden_states,)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int )-> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = TaLayerNorm(a )
lowercase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
lowercase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
lowercase__ = nn.Dropout(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : Union[str, Any]=None , a : Tuple=None , )-> int:
"""simple docstring"""
lowercase__ = self.layer_norm(a )
if conditioning_emb is not None:
lowercase__ = self.FiLMLayer(a , a )
# Self-attention block
lowercase__ = self.attention(a )
lowercase__ = hidden_states + self.dropout(a )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[int] , a : Optional[int] , a : int , a : Optional[int] , a : Dict , a : Any )-> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
lowercase__ = TaLayerNorm(a , eps=a )
lowercase__ = nn.Dropout(a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[Any] , a : str=None , a : Dict=None , )-> int:
"""simple docstring"""
lowercase__ = self.layer_norm(a )
lowercase__ = self.attention(
a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , )
lowercase__ = hidden_states + self.dropout(a )
return layer_output
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : str , a : int , a : List[Any] , a : List[str] )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a )
lowercase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
lowercase__ = TaLayerNorm(a , eps=a )
lowercase__ = nn.Dropout(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[Any] , a : List[Any]=None )-> List[Any]:
"""simple docstring"""
lowercase__ = self.layer_norm(a )
if conditioning_emb is not None:
lowercase__ = self.film(a , a )
lowercase__ = self.DenseReluDense(a )
lowercase__ = hidden_states + self.dropout(a )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[Any] , a : Tuple , a : Tuple , a : Dict )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Dropout(a )
lowercase__ = NewGELUActivation()
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str )-> str:
"""simple docstring"""
lowercase__ = self.act(self.wi_a(a ) )
lowercase__ = self.wi_a(a )
lowercase__ = hidden_gelu * hidden_linear
lowercase__ = self.dropout(a )
lowercase__ = self.wo(a )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : Dict , a : Dict=1E-6 )-> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Parameter(torch.ones(a ) )
lowercase__ = eps
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a )
lowercase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(a , 3.0 )) ))
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : int , a : Optional[Any] , a : Any )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(a , out_features * 2 , bias=a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple , a : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.scale_bias(a )
lowercase__ , lowercase__ = torch.chunk(a , 2 , -1 )
lowercase__ = x * (1 + scale) + shift
return x
| 269
| 1
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase_ , n - 1 )
rec_insertion_sort(UpperCAmelCase_ , n - 1 )
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
if index >= len(UpperCAmelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__snake_case , __snake_case : Union[str, Any] = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase_ , index + 1 )
if __name__ == "__main__":
_a : Tuple= input("Enter integers separated by spaces: ")
_a : list[int]= [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 172
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__(self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : List[Any]=3 , _A : Tuple=True , _A : List[str]=True , _A : Any=0.1 , _A : str=0.1 , _A : Union[str, Any]=2_24 , _A : Dict=10_00 , _A : Optional[int]=[3, 3, 6, 4] , _A : Optional[Any]=[48, 56, 1_12, 2_20] , ) -> List[str]:
__snake_case : int = parent
__snake_case : str = batch_size
__snake_case : int = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = use_labels
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Dict = num_labels
__snake_case : Union[str, Any] = image_size
__snake_case : int = layer_depths
__snake_case : List[str] = embed_dims
def _lowercase (self : List[Any]) -> Dict:
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : int = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.num_labels)
__snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Union[str, Any]) -> List[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_A , layer_scale_init_value=1E-5 , )
def _lowercase (self : int , _A : Union[str, Any] , _A : Tuple , _A : List[str]) -> Optional[int]:
__snake_case : str = SwiftFormerModel(config=_A)
model.to(_A)
model.eval()
__snake_case : Union[str, Any] = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7))
def _lowercase (self : Optional[int] , _A : List[str] , _A : Union[str, Any] , _A : Union[str, Any]) -> int:
__snake_case : Optional[int] = self.num_labels
__snake_case : Dict = SwiftFormerForImageClassification(_A)
model.to(_A)
model.eval()
__snake_case : List[Any] = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
__snake_case : List[str] = SwiftFormerForImageClassification(_A)
model.to(_A)
model.eval()
__snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : Union[str, Any] = model(_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowercase (self : Optional[int]) -> int:
((__snake_case) , (__snake_case) , (__snake_case)) : List[Any] = self.prepare_config_and_inputs()
__snake_case : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase : Union[str, Any] = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Any = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = False
def _lowercase (self : int) -> Optional[int]:
__snake_case : Dict = SwiftFormerModelTester(self)
__snake_case : List[Any] = ConfigTester(
self , config_class=_A , has_text_modality=_A , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _lowercase (self : Dict) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds')
def _lowercase (self : Optional[int]) -> Optional[int]:
pass
def _lowercase (self : Dict) -> Optional[int]:
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_A)
__snake_case : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear))
def _lowercase (self : str) -> Any:
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_A)
__snake_case : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _lowercase (self : List[Any]) -> List[str]:
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Optional[int]) -> int:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@slow
def _lowercase (self : Union[str, Any]) -> Dict:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = SwiftFormerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
@unittest.skip(reason='SwiftFormer does not output attentions')
def _lowercase (self : Dict) -> int:
pass
def _lowercase (self : Union[str, Any]) -> List[Any]:
def check_hidden_states_output(_A : str , _A : int , _A : str):
__snake_case : Optional[int] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(_A , _A))
__snake_case : Optional[int] = outputs.hidden_states
__snake_case : Any = 8
self.assertEqual(len(_A) , _A) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_A)):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
]) , )
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Any = True
check_hidden_states_output(_A , _A , _A)
def _lowercase (self : List[Any]) -> int:
def _config_zero_init(_A : Union[str, Any]):
__snake_case : Optional[int] = copy.deepcopy(_A)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_A , _A , 1E-10)
if isinstance(getattr(_A , _A , _A) , _A):
__snake_case : Optional[int] = _config_zero_init(getattr(_A , _A))
setattr(_A , _A , _A)
return configs_no_init
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = _config_zero_init(_A)
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(config=_A)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowercase (self : List[str]) -> List[Any]:
pass
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _lowercase (self : Dict) -> List[str]:
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs') if is_vision_available() else None
@slow
def _lowercase (self : Any) -> List[Any]:
__snake_case : Any = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs').to(_A)
__snake_case : Any = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Optional[int] = image_processor(images=_A , return_tensors='pt').to(_A)
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_A)
# verify the logits
__snake_case : Optional[Any] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Optional[int] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4))
| 172
| 1
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowercase_ : int = 128 , lowercase_ : int = 256 , lowercase_ : float = 20_00.0 , lowercase_ : int = 768 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 64 , lowercase_ : int = 2048 , lowercase_ : float = 0.1 , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = nn.Sequential(
nn.Linear(lowercase_ , d_model * 4 , bias=lowercase_) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase_) , nn.SiLU() , )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Embedding(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.Dropout(p=lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.ModuleList()
for lyr_num in range(lowercase_):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE_ : Optional[Any] = DecoderLayer(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_)
self.decoders.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = TaLayerNorm(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = nn.Dropout(p=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.conditioning_emb(lowercase_).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE_ : int = torch.broadcast_to(
torch.arange(lowercase_ , device=decoder_input_tokens.device) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE_ : Any = self.position_encoding(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.continuous_inputs_projection(lowercase_)
inputs += position_encodings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dropout(lowercase_)
# decoder: No padding present.
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE_ : List[str] = [(x, self.encoder_decoder_mask(lowercase_ , lowercase_)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
SCREAMING_SNAKE_CASE_ : int = lyr(
lowercase_ , conditioning_emb=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.decoder_norm(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.post_dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.spec_out(lowercase_)
return spec_out
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[str]=1e-6):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.layer[0](
lowercase_ , conditioning_emb=lowercase_ , attention_mask=lowercase_ , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
SCREAMING_SNAKE_CASE_ : str = self.layer[1](
lowercase_ , key_value_states=lowercase_ , attention_mask=lowercase_ , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE_ : str = self.layer[-1](lowercase_ , lowercase_)
return (hidden_states,)
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : int = TaLayerNorm(lowercase_)
SCREAMING_SNAKE_CASE_ : int = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.Dropout(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : List[Any] , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.layer_norm(lowercase_)
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.FiLMLayer(lowercase_ , lowercase_)
# Self-attention block
SCREAMING_SNAKE_CASE_ : int = self.attention(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_states + self.dropout(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TaLayerNorm(lowercase_ , eps=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Dropout(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.layer_norm(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.attention(
lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=attention_mask.squeeze(1) , )
SCREAMING_SNAKE_CASE_ : Any = hidden_states + self.dropout(lowercase_)
return layer_output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = TaDenseGatedActDense(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaLayerNorm(lowercase_ , eps=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = nn.Dropout(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[int] , lowercase_ : int=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.layer_norm(lowercase_)
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.film(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.DenseReluDense(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = hidden_states + self.dropout(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_)
SCREAMING_SNAKE_CASE_ : str = nn.Dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = NewGELUActivation()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.act(self.wi_a(lowercase_))
SCREAMING_SNAKE_CASE_ : Any = self.wi_a(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dropout(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.wo(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any]=1e-6):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Parameter(torch.ones(lowercase_))
SCREAMING_SNAKE_CASE_ : int = eps
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE_ : Any = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : torch.Tensor):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.04_47_15 * torch.pow(lowercase_ , 3.0))))
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase_ : List[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(lowercase_ , out_features * 2 , bias=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.scale_bias(lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = torch.chunk(lowercase_ , 2 , -1)
SCREAMING_SNAKE_CASE_ : Optional[int] = x * (1 + scale) + shift
return x
| 318
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
| 1
|
"""simple docstring"""
from math import isqrt
def _snake_case ( lowercase__ ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _snake_case ( lowercase__ = 10**6 ):
_lowerCamelCase : str = 0
_lowerCamelCase : int = 1
_lowerCamelCase : Union[str, Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 96
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1024, 1024]
UpperCAmelCase = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 150
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """ade20k-id2label.json"""
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(_snake_case , _snake_case , repo_type="""dataset""" ) ) , """r""" ) )
UpperCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _a ( _snake_case ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
UpperCAmelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
UpperCAmelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
UpperCAmelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
UpperCAmelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
UpperCAmelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
UpperCAmelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
UpperCAmelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
UpperCAmelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
UpperCAmelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
UpperCAmelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
UpperCAmelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
UpperCAmelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
UpperCAmelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
UpperCAmelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
UpperCAmelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
UpperCAmelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
UpperCAmelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
UpperCAmelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
UpperCAmelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( ):
"""simple docstring"""
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = get_dpt_config(_snake_case )
# load original state_dict from URL
UpperCAmelCase = torch.hub.load_state_dict_from_url(_snake_case , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_snake_case )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(_snake_case )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(_snake_case , _snake_case )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(_snake_case ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if """ade""" in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=_snake_case )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(_snake_case , return_tensors="""pt""" )
# forward pass
UpperCAmelCase = model(**_snake_case ).logits if """ade""" in checkpoint_url else model(**_snake_case ).predicted_depth
# Assert logits
UpperCAmelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCAmelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _snake_case , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _snake_case )
)
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 365
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""projector.weight"""]
UpperCAmelCase = downstream_dict["""projector.bias"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""model.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForXVector.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""connector.weight"""]
UpperCAmelCase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCAmelCase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.load(_snake_case , map_location="""cpu""" )
UpperCAmelCase = checkpoint["""Downstream"""]
UpperCAmelCase = WavaVecaConfig.from_pretrained(_snake_case )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_snake_case , return_attention_mask=_snake_case , do_normalize=_snake_case )
UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCAmelCase = convert_classification(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCAmelCase = convert_diarization(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForXVector""" ):
UpperCAmelCase = convert_xvector(_snake_case , _snake_case , _snake_case )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 234
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = ["""pixel_values"""]
def __init__( self : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Optional[int] , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :Tuple = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase :Any = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCamelCase :Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase :int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="""crop_size""" )
UpperCamelCase :Tuple = do_resize
UpperCamelCase :Optional[int] = size
UpperCamelCase :List[Any] = resample
UpperCamelCase :Union[str, Any] = do_center_crop
UpperCamelCase :Tuple = crop_size
UpperCamelCase :Union[str, Any] = do_rescale
UpperCamelCase :Dict = rescale_factor
UpperCamelCase :Optional[Any] = do_normalize
UpperCamelCase :Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase :str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase :str = do_convert_rgb
def _A ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
UpperCamelCase :Tuple = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase :int = get_resize_output_image_size(__lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Tuple , ):
UpperCamelCase :str = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : List[str] , ):
UpperCamelCase :Dict = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :List[str] = size if size is not None else self.size
UpperCamelCase :str = get_size_dict(__lowerCamelCase , param_name="""size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :Optional[Any] = resample if resample is not None else self.resample
UpperCamelCase :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase :List[str] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase :str = get_size_dict(__lowerCamelCase , param_name="""crop_size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase :int = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase :List[str] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase :Optional[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase :Dict = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase :Optional[int] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase :Dict = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase :Tuple = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase :str = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase :List[str] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase :str = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
UpperCamelCase :Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase :str = {"""pixel_values""": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 38
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
return image
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( a__ , a__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __lowercase ( a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = 3_64 if 'coco' in model_name else 2_24
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=a__ , text_config=a__ )
return config, image_size
@torch.no_grad()
def __lowercase ( a__ , a__=None , a__=False ) -> Any:
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=a__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(a__ , eos_token_id=a__ )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(a__ ).eval()
__SCREAMING_SNAKE_CASE = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print('Done!' )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
if key.startswith('Qformer.bert' ):
__SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace('self' , 'attention' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__SCREAMING_SNAKE_CASE = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__SCREAMING_SNAKE_CASE = key.replace('t5' , 'language' )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(a__ , strict=a__ )
assert len(a__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors['eval'](a__ ).unsqueeze(0 ).to(a__ )
__SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(a__ )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a__ , image_std=a__ )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ )
# make sure processor creates exact same pixel values
assert torch.allclose(a__ , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ , labels=a__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=a__ )
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=a__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' ).input_ids.to(a__ )
__SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , a__ )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('HF generation:' , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
lowerCAmelCase__ : Union[str, Any] =[
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ : int =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> Optional[int]:
__magic_name__ : int = []
def __lowerCAmelCase ( self : Tuple , _A : str , _A : int , _A : Any , **_A : Optional[int] ) -> Optional[Any]:
self.events.append('on_init_end' )
def __lowerCAmelCase ( self : Any , _A : Tuple , _A : List[str] , _A : List[Any] , **_A : List[str] ) -> Any:
self.events.append('on_train_begin' )
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] , _A : str , **_A : Union[str, Any] ) -> List[Any]:
self.events.append('on_train_end' )
def __lowerCAmelCase ( self : Tuple , _A : Tuple , _A : Union[str, Any] , _A : Tuple , **_A : List[Any] ) -> Dict:
self.events.append('on_epoch_begin' )
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : int , _A : List[Any] , **_A : str ) -> int:
self.events.append('on_epoch_end' )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : Tuple , _A : int , **_A : Optional[Any] ) -> Any:
self.events.append('on_step_begin' )
def __lowerCAmelCase ( self : Optional[Any] , _A : int , _A : Dict , _A : Union[str, Any] , **_A : Union[str, Any] ) -> List[str]:
self.events.append('on_step_end' )
def __lowerCAmelCase ( self : Dict , _A : Any , _A : Optional[Any] , _A : Optional[Any] , **_A : List[str] ) -> Tuple:
self.events.append('on_evaluate' )
def __lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : Union[str, Any] , _A : Optional[int] , **_A : Union[str, Any] ) -> Tuple:
self.events.append('on_predict' )
def __lowerCAmelCase ( self : Dict , _A : List[str] , _A : Any , _A : Tuple , **_A : List[str] ) -> List[str]:
self.events.append('on_save' )
def __lowerCAmelCase ( self : Tuple , _A : Dict , _A : List[Any] , _A : Union[str, Any] , **_A : Optional[int] ) -> str:
self.events.append('on_log' )
def __lowerCAmelCase ( self : int , _A : Dict , _A : str , _A : List[Any] , **_A : int ) -> str:
self.events.append('on_prediction_step' )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Union[str, Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self : str ) -> Any:
shutil.rmtree(self.output_dir )
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[Any]=0 , _A : Optional[Any]=0 , _A : Tuple=64 , _A : Any=64 , _A : List[str]=None , _A : Dict=False , **_A : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__magic_name__ : Tuple = RegressionDataset(length=_A )
__magic_name__ : List[Any] = RegressionDataset(length=_A )
__magic_name__ : Dict = RegressionModelConfig(a=_A , b=_A )
__magic_name__ : Dict = RegressionPreTrainedModel(_A )
__magic_name__ : Optional[Any] = TrainingArguments(self.output_dir , disable_tqdm=_A , report_to=[] , **_A )
return Trainer(
_A , _A , train_dataset=_A , eval_dataset=_A , callbacks=_A , )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : List[Any] ) -> Optional[int]:
self.assertEqual(len(_A ) , len(_A ) )
# Order doesn't matter
__magic_name__ : Dict = sorted(_A , key=lambda _A : cb.__name__ if isinstance(_A , _A ) else cb.__class__.__name__ )
__magic_name__ : Union[str, Any] = sorted(_A , key=lambda _A : cb.__name__ if isinstance(_A , _A ) else cb.__class__.__name__ )
for cba, cba in zip(_A , _A ):
if isinstance(_A , _A ) and isinstance(_A , _A ):
self.assertEqual(_A , _A )
elif isinstance(_A , _A ) and not isinstance(_A , _A ):
self.assertEqual(_A , cba.__class__ )
elif not isinstance(_A , _A ) and isinstance(_A , _A ):
self.assertEqual(cba.__class__ , _A )
else:
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Any , _A : Optional[Any] ) -> Dict:
__magic_name__ : List[str] = ['on_init_end', 'on_train_begin']
__magic_name__ : int = 0
__magic_name__ : Optional[Any] = len(trainer.get_eval_dataloader() )
__magic_name__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(_A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : Any = self.get_trainer()
__magic_name__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# Callbacks passed at init are added to the default callbacks
__magic_name__ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__magic_name__ : Optional[int] = self.get_trainer(disable_tqdm=_A )
__magic_name__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__magic_name__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_A )
expected_callbacks.remove(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
__magic_name__ : List[Any] = self.get_trainer()
__magic_name__ : Any = trainer.pop_callback(_A )
self.assertEqual(cb.__class__ , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
trainer.add_callback(_A )
expected_callbacks.insert(0 , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
# We can also add, pop, or remove by instance
__magic_name__ : str = self.get_trainer()
__magic_name__ : int = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_A )
expected_callbacks.remove(_A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
__magic_name__ : Dict = self.get_trainer()
__magic_name__ : Dict = trainer.callback_handler.callbacks[0]
__magic_name__ : Dict = trainer.pop_callback(_A )
self.assertEqual(_A , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
trainer.add_callback(_A )
expected_callbacks.insert(0 , _A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=_A )
__magic_name__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__magic_name__ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# Independent log/save/eval
__magic_name__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__magic_name__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
__magic_name__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__magic_name__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
__magic_name__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
__magic_name__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
__magic_name__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
__magic_name__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# A bit of everything
__magic_name__ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
__magic_name__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_A , self.get_expected_events(_A ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
__magic_name__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_A ) in warn_mock.call_args[0][0]
| 275
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 275
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase = datasets.utils.logging.get_logger(__name__)
lowerCamelCase = ['names', 'prefix']
lowerCamelCase = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowerCamelCase = ['encoding_errors', 'on_bad_lines']
lowerCamelCase = ['date_format']
@dataclass
class A ( datasets.BuilderConfig ):
UpperCamelCase__ : str =","
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : Optional[Union[int, List[int], str]] ="infer"
UpperCamelCase__ : Optional[List[str]] =None
UpperCamelCase__ : Optional[List[str]] =None
UpperCamelCase__ : Optional[Union[int, str, List[int], List[str]]] =None
UpperCamelCase__ : Optional[Union[List[int], List[str]]] =None
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[Literal["c", "python", "pyarrow"]] =None
UpperCamelCase__ : Dict[Union[int, str], Callable[[Any], Any]] =None
UpperCamelCase__ : Optional[list] =None
UpperCamelCase__ : Optional[list] =None
UpperCamelCase__ : bool =False
UpperCamelCase__ : Optional[Union[int, List[int]]] =None
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Optional[Union[str, List[str]]] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : str ="."
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : str ='"'
UpperCamelCase__ : int =0
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : int =0
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : Optional[str] =None
UpperCamelCase__ : int =10000
UpperCamelCase__ : Optional[datasets.Features] =None
UpperCamelCase__ : Optional[str] ="strict"
UpperCamelCase__ : Literal["error", "warn", "skip"] ="error"
UpperCamelCase__ : Optional[str] =None
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
if self.delimiter is not None:
_lowerCamelCase : Union[str, Any] =self.delimiter
if self.column_names is not None:
_lowerCamelCase : Dict =self.column_names
@property
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Any ={
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowercase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ : List[str] =CsvConfig
def lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase ( self : Tuple , lowercase_ : Any ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowerCamelCase : int =dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase_ , (str, list, tuple) ):
_lowerCamelCase : List[Any] =data_files
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase : Union[str, Any] =[files]
_lowerCamelCase : Union[str, Any] =[dl_manager.iter_files(lowercase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
_lowerCamelCase : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_ ):
_lowerCamelCase : Dict =[files]
_lowerCamelCase : Optional[int] =[dl_manager.iter_files(lowercase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={'files': files} ) )
return splits
def lowerCamelCase ( self : int , lowercase_ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_lowerCamelCase : List[str] =self.config.features.arrow_schema
if all(not require_storage_cast(lowercase_ ) for feature in self.config.features.values() ):
# cheaper cast
_lowerCamelCase : Optional[int] =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowercase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowerCamelCase : List[Any] =table_cast(lowercase_ , lowercase_ )
return pa_table
def lowerCamelCase ( self : Optional[Any] , lowercase_ : List[Any] ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowerCamelCase : Union[str, Any] =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowercase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase_ ) ):
_lowerCamelCase : Union[str, Any] =pd.read_csv(lowercase_ , iterator=lowercase_ , dtype=lowercase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowercase_ ):
_lowerCamelCase : Tuple =pa.Table.from_pandas(lowercase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase_ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowercase_ )}: {e}''' )
raise
| 199
| 0
|
from maths.prime_check import is_prime
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = f"Input value of [number={number}] must be an integer"
raise TypeError(__UpperCAmelCase )
if is_prime(__UpperCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Union[str, Any] ="maskformer-swin"
UpperCamelCase__ : List[str] ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , _lowercase :Optional[int]=224 , _lowercase :List[str]=4 , _lowercase :Tuple=3 , _lowercase :List[Any]=96 , _lowercase :Any=[2, 2, 6, 2] , _lowercase :int=[3, 6, 12, 24] , _lowercase :List[Any]=7 , _lowercase :Dict=4.0 , _lowercase :Any=True , _lowercase :int=0.0 , _lowercase :List[Any]=0.0 , _lowercase :Tuple=0.1 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=False , _lowercase :Tuple=0.02 , _lowercase :List[str]=1E-5 , _lowercase :List[str]=None , _lowercase :Any=None , **_lowercase :Any , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_lowercase)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_lowercase) - 1))
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_lowercase) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 344
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger("""transformers.models.speecht5""")
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
hf_model.apply_weight_norm()
snake_case : Optional[Any] = checkpoint["input_conv.weight_g"]
snake_case : Union[str, Any] = checkpoint["input_conv.weight_v"]
snake_case : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
snake_case : List[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
snake_case : Dict = checkpoint[f"""upsamples.{i}.1.weight_v"""]
snake_case : Dict = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case : Union[str, Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case : Optional[int] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case : List[str] = checkpoint["output_conv.1.weight_g"]
snake_case : Optional[int] = checkpoint["output_conv.1.weight_v"]
snake_case : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , ):
if config_path is not None:
snake_case : Any = SpeechTaHifiGanConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : List[Any] = SpeechTaHifiGanConfig()
snake_case : Tuple = SpeechTaHifiGan(__lowerCamelCase )
snake_case : Any = torch.load(__lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , __lowerCamelCase , __lowerCamelCase )
snake_case : int = np.load(__lowerCamelCase )
snake_case : List[str] = stats[0].reshape(-1 )
snake_case : Dict = stats[1].reshape(-1 )
snake_case : Optional[Any] = torch.from_numpy(__lowerCamelCase ).float()
snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).float()
model.save_pretrained(__lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 59
|
__lowerCamelCase = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case : List[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
| 1
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_lowercase = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
_lowercase = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = """ Hello world! cécé herlolip"""
_lowercase = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[int] , __lowerCamelCase :List[Any] , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :str ):
_lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" )
_lowerCAmelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def A (__lowerCamelCase :Any ):
_lowerCAmelCase , _lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_lowerCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Dict=None ):
if not os.path.exists(__lowerCamelCase ):
_lowerCAmelCase = torch.hub.load("""pytorch/fairseq""" , __lowerCamelCase ).eval()
else:
_lowerCAmelCase = load_xsum_checkpoint(__lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowerCAmelCase = checkpoint_path.replace(""".""" , """-""" )
_lowerCAmelCase = BartConfig.from_pretrained(__lowerCamelCase )
_lowerCAmelCase = bart.encode(__lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
_lowerCAmelCase = bart.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_lowerCAmelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = BartForSequenceClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_lowerCAmelCase = bart.predict("""mnli""" , __lowerCamelCase , return_logits=__lowerCamelCase )
_lowerCAmelCase = model(__lowerCamelCase )[0] # logits
else: # no classification heads to worry about
_lowerCAmelCase = bart.model.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase = bart.extract_features(__lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
_lowerCAmelCase = BartModel(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_lowerCAmelCase = model(__lowerCamelCase ).model[0]
else:
_lowerCAmelCase = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__lowerCamelCase )
if hasattr(__lowerCamelCase , """lm_head""" ):
_lowerCAmelCase = make_linear_from_emb(model.model.shared )
_lowerCAmelCase = model.model(__lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
_lowercase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 367
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=3 , _lowercase=4 , _lowercase=[10, 20, 30, 40] , _lowercase=[2, 2, 3, 2] , _lowercase=True , _lowercase=True , _lowercase=37 , _lowercase="gelu" , _lowercase=10 , _lowercase=0.02 , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=3 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_stages
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = out_features
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = num_stages
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowercase ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = UperNetForSemanticSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_lowercase : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_lowercase : Dict = False
_lowercase : Optional[Any] = False
_lowercase : List[str] = False
_lowercase : Union[str, Any] = False
_lowercase : List[str] = False
_lowercase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = UperNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
_lowerCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = _config_zero_init(_lowercase )
_lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(config=_lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def A ():
_lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_lowerCAmelCase = Image.open(__lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowercase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
_lowerCAmelCase = model(**_lowercase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowercase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
_lowerCAmelCase = model(**_lowercase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
| 229
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323
| 1
|
A_ :Union[str, Any] = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 245
|
def A ( a_ ) -> bool:
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : int = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __A ( __a ):
lowerCAmelCase_ : Optional[Any] = "gpt_neox"
def __init__( self : Dict , UpperCAmelCase_ : Any=50432 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Any=44 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : Optional[int]=24576 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : int=0.25 , UpperCAmelCase_ : str=10000 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : str=2048 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=1E-5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : List[Any] , ):
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Union[str, Any] = rotary_pct
lowerCAmelCase : Tuple = rotary_emb_base
lowerCAmelCase : Any = attention_dropout
lowerCAmelCase : Dict = hidden_dropout
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : str = use_cache
lowerCAmelCase : Any = tie_word_embeddings
lowerCAmelCase : Optional[int] = use_parallel_residual
lowerCAmelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def lowercase__ ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"got {self.rope_scaling}" )
lowerCAmelCase : List[Any] = self.rope_scaling.get('type' , _UpperCamelCase )
lowerCAmelCase : Any = self.rope_scaling.get('factor' , _UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_UpperCamelCase , _UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 138
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231
| 0
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE : Dict = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCamelCase ( _a , _a=None ) -> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , _a )
| 252
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
SCREAMING_SNAKE_CASE : int = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252
| 1
|
from __future__ import annotations
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(numsa + numsa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = divmod(len(_lowerCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Dict =[float(x) for x in input('Enter the elements of first array: ').split()]
__lowerCAmelCase : int =[float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 9
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
| 320
| 0
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__magic_name__: str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__magic_name__: Optional[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
__magic_name__: List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__magic_name__: Dict = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__magic_name__: Optional[int] = "allenai"
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = dict((re.sub(R"""@@$""", """""", _A ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""", """</w>""", _A ), v) for k, v in d.items() )
__magic_name__ : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__magic_name__ : Dict = d[k] # restore
return da
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert os.path.exists(_A )
os.makedirs(_A, exist_ok=_A )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__magic_name__ : List[str] = basename(_A )
__magic_name__ : Tuple = dirname(_A )
__magic_name__ : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__magic_name__ : Tuple = cls.hub_models()
__magic_name__ : Optional[Any] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__magic_name__ : Union[str, Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
__magic_name__ : Dict = hub_utils.from_pretrained(
_A, _A, _A, archive_map=_A, **_A )
__magic_name__ : int = vars(chkpt["""args"""]["""model"""] )
__magic_name__ : Tuple = args["""source_lang"""]
__magic_name__ : int = args["""target_lang"""]
__magic_name__ : int = dirname(_A )
__magic_name__ : List[Any] = basename(_A )
# dicts
__magic_name__ : List[Any] = os.path.join(_A, f'dict.{src_lang}.txt' )
__magic_name__ : List[str] = os.path.join(_A, f'dict.{tgt_lang}.txt' )
__magic_name__ : Tuple = Dictionary.load(_A )
__magic_name__ : List[str] = rewrite_dict_keys(src_dict.indices )
__magic_name__ : str = len(_A )
__magic_name__ : Tuple = os.path.join(_A, """vocab-src.json""" )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(_A, ensure_ascii=_A, indent=_A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__magic_name__ : List[Any] = True
for k in src_vocab.keys():
if not k.islower():
__magic_name__ : Optional[int] = False
break
__magic_name__ : List[str] = Dictionary.load(_A )
__magic_name__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
__magic_name__ : int = len(_A )
__magic_name__ : Dict = os.path.join(_A, """vocab-tgt.json""" )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(_A, ensure_ascii=_A, indent=_A ) )
# merges_file (bpecodes)
__magic_name__ : Tuple = os.path.join(_A, VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__magic_name__ : Tuple = os.path.join(_A, _A )
if os.path.exists(_A ):
break
with open(_A, encoding="""utf-8""" ) as fin:
__magic_name__ : List[str] = fin.read()
__magic_name__ : Any = re.sub(R""" \d+$""", """""", _A, 0, re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(_A, """w""", encoding="""utf-8""" ) as fout:
fout.write(_A )
# model config
__magic_name__ : Optional[Any] = os.path.join(_A, """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
__magic_name__ : List[Any] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__magic_name__ : Union[str, Any] = 5
__magic_name__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__magic_name__ : Tuple = best_score_hparams[model_dir]["""length_penalty"""]
else:
__magic_name__ : List[str] = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(_A, ensure_ascii=_A, indent=_A ) )
# tokenizer config
__magic_name__ : Tuple = os.path.join(_A, _A )
__magic_name__ : Optional[int] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write(json.dumps(_A, ensure_ascii=_A, indent=_A ) )
# model
__magic_name__ : Dict = chkpt["""models"""][0]
__magic_name__ : Any = model.state_dict()
# rename keys to start with 'model.'
__magic_name__ : Optional[int] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__magic_name__ : Optional[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(_A, _A )
__magic_name__ : Optional[int] = FSMTConfig.from_pretrained(_A )
__magic_name__ : List[Any] = FSMTForConditionalGeneration(_A )
# check that it loads ok
model_new.load_state_dict(_A, strict=_A )
# save
__magic_name__ : Union[str, Any] = os.path.join(_A, _A )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(_A, _A )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__magic_name__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: str = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 359
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Dict = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''blenderbot-small'''
lowercase__ : Optional[int] = ['''past_key_values''']
lowercase__ : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Tuple:
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Union[str, Any] = d_model
__magic_name__ : Optional[int] = encoder_ffn_dim
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : List[str] = encoder_attention_heads
__magic_name__ : List[Any] = decoder_ffn_dim
__magic_name__ : str = decoder_layers
__magic_name__ : List[str] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : List[Any] = activation_function
__magic_name__ : Optional[int] = init_std
__magic_name__ : Dict = encoder_layerdrop
__magic_name__ : Union[str, Any] = decoder_layerdrop
__magic_name__ : Optional[int] = use_cache
__magic_name__ : List[Any] = encoder_layers
__magic_name__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ : List[Any] = {0: """batch"""}
__magic_name__ : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__magic_name__ ,__magic_name__ : Dict = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__magic_name__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Any = super().outputs
else:
__magic_name__ : int = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__magic_name__ ,__magic_name__ : str = self.num_layers
for i in range(lowerCAmelCase__ ):
__magic_name__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
__magic_name__ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__magic_name__ : Optional[int] = seq_length if not self.use_past else 1
__magic_name__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Dict = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : List[Any] = common_inputs["""input_ids"""].shape
__magic_name__ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
__magic_name__ ,__magic_name__ : str = self.num_attention_heads
__magic_name__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Any = decoder_seq_length + 3
__magic_name__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ ,__magic_name__ : List[str] = self.num_layers
__magic_name__ : Optional[Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__magic_name__ : Tuple = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__magic_name__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : List[Any] = seqlen + 2
__magic_name__ ,__magic_name__ : Any = self.num_layers
__magic_name__ ,__magic_name__ : int = self.num_attention_heads
__magic_name__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : Optional[int] = common_inputs["""attention_mask"""].dtype
__magic_name__ : Optional[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__magic_name__ : Tuple = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__magic_name__ : List[str] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : List[Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
__magic_name__ : str = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__magic_name__ : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 138
| 0
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __lowercase ( _lowercase , _lowercase ):
@register_to_config
def __init__(self , A = 1_2_8 , A = 2_5_6 , A = 20_00.0 , A = 7_6_8 , A = 1_2 , A = 1_2 , A = 6_4 , A = 2_0_4_8 , A = 0.1 , ):
super().__init__()
lowerCamelCase_ : Optional[int] = nn.Sequential(
nn.Linear(A , d_model * 4 , bias=A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=A ) , nn.SiLU() , )
lowerCamelCase_ : Optional[Any] = nn.Embedding(A , A )
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : List[Any] = nn.Linear(A , A , bias=A )
lowerCamelCase_ : List[Any] = nn.Dropout(p=A )
lowerCamelCase_ : List[str] = nn.ModuleList()
for lyr_num in range(A ):
# FiLM conditional T5 decoder
lowerCamelCase_ : List[Any] = DecoderLayer(d_model=A , d_kv=A , num_heads=A , d_ff=A , dropout_rate=A )
self.decoders.append(A )
lowerCamelCase_ : Any = TaLayerNorm(A )
lowerCamelCase_ : Optional[Any] = nn.Dropout(p=A )
lowerCamelCase_ : Optional[int] = nn.Linear(A , A , bias=A )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Tuple = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCamelCase_ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCamelCase_ : Any = self.conditioning_emb(A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCamelCase_ : List[str] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCamelCase_ : Tuple = torch.broadcast_to(
torch.arange(A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCamelCase_ : Union[str, Any] = self.position_encoding(A )
lowerCamelCase_ : List[str] = self.continuous_inputs_projection(A )
inputs += position_encodings
lowerCamelCase_ : Dict = self.dropout(A )
# decoder: No padding present.
lowerCamelCase_ : str = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCamelCase_ : Any = [(x, self.encoder_decoder_mask(A , A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCamelCase_ : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCamelCase_ : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCamelCase_ : Union[str, Any] = lyr(
A , conditioning_emb=A , encoder_hidden_states=A , encoder_attention_mask=A , )[0]
lowerCamelCase_ : Dict = self.decoder_norm(A )
lowerCamelCase_ : Any = self.post_dropout(A )
lowerCamelCase_ : Optional[int] = self.spec_out(A )
return spec_out
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A , A , A=1E-6 ):
super().__init__()
lowerCamelCase_ : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=A , d_kv=A , num_heads=A , dropout_rate=A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=A , d_kv=A , num_heads=A , dropout_rate=A , layer_norm_epsilon=A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=A , d_ff=A , dropout_rate=A , layer_norm_epsilon=A ) )
def UpperCAmelCase__ (self , A , A=None , A=None , A=None , A=None , A=None , ):
lowerCamelCase_ : Dict = self.layer[0](
A , conditioning_emb=A , attention_mask=A , )
if encoder_hidden_states is not None:
lowerCamelCase_ : Dict = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowerCamelCase_ : Dict = self.layer[1](
A , key_value_states=A , attention_mask=A , )
# Apply Film Conditional Feed Forward layer
lowerCamelCase_ : Optional[int] = self.layer[-1](A , A )
return (hidden_states,)
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A ):
super().__init__()
lowerCamelCase_ : Optional[int] = TaLayerNorm(A )
lowerCamelCase_ : List[str] = TaFiLMLayer(in_features=d_model * 4 , out_features=A )
lowerCamelCase_ : str = Attention(query_dim=A , heads=A , dim_head=A , out_bias=A , scale_qk=A )
lowerCamelCase_ : Union[str, Any] = nn.Dropout(A )
def UpperCAmelCase__ (self , A , A=None , A=None , ):
# pre_self_attention_layer_norm
lowerCamelCase_ : int = self.layer_norm(A )
if conditioning_emb is not None:
lowerCamelCase_ : Union[str, Any] = self.FiLMLayer(A , A )
# Self-attention block
lowerCamelCase_ : Optional[int] = self.attention(A )
lowerCamelCase_ : Tuple = hidden_states + self.dropout(A )
return hidden_states
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A , A ):
super().__init__()
lowerCamelCase_ : Union[str, Any] = Attention(query_dim=A , heads=A , dim_head=A , out_bias=A , scale_qk=A )
lowerCamelCase_ : Any = TaLayerNorm(A , eps=A )
lowerCamelCase_ : Dict = nn.Dropout(A )
def UpperCAmelCase__ (self , A , A=None , A=None , ):
lowerCamelCase_ : Tuple = self.layer_norm(A )
lowerCamelCase_ : Optional[Any] = self.attention(
A , encoder_hidden_states=A , attention_mask=attention_mask.squeeze(1 ) , )
lowerCamelCase_ : int = hidden_states + self.dropout(A )
return layer_output
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = TaDenseGatedActDense(d_model=A , d_ff=A , dropout_rate=A )
lowerCamelCase_ : int = TaFiLMLayer(in_features=d_model * 4 , out_features=A )
lowerCamelCase_ : Tuple = TaLayerNorm(A , eps=A )
lowerCamelCase_ : int = nn.Dropout(A )
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : List[Any] = self.layer_norm(A )
if conditioning_emb is not None:
lowerCamelCase_ : str = self.film(A , A )
lowerCamelCase_ : Tuple = self.DenseReluDense(A )
lowerCamelCase_ : Tuple = hidden_states + self.dropout(A )
return hidden_states
class __lowercase ( nn.Module ):
def __init__(self , A , A , A ):
super().__init__()
lowerCamelCase_ : Dict = nn.Linear(A , A , bias=A )
lowerCamelCase_ : Optional[int] = nn.Linear(A , A , bias=A )
lowerCamelCase_ : List[Any] = nn.Linear(A , A , bias=A )
lowerCamelCase_ : Tuple = nn.Dropout(A )
lowerCamelCase_ : int = NewGELUActivation()
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = self.act(self.wi_a(A ) )
lowerCamelCase_ : Union[str, Any] = self.wi_a(A )
lowerCamelCase_ : Dict = hidden_gelu * hidden_linear
lowerCamelCase_ : int = self.dropout(A )
lowerCamelCase_ : Optional[int] = self.wo(A )
return hidden_states
class __lowercase ( nn.Module ):
def __init__(self , A , A=1E-6 ):
super().__init__()
lowerCamelCase_ : Union[str, Any] = nn.Parameter(torch.ones(A ) )
lowerCamelCase_ : Union[str, Any] = eps
def UpperCAmelCase__ (self , A ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCamelCase_ : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=A )
lowerCamelCase_ : List[str] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCamelCase_ : List[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __lowercase ( nn.Module ):
def UpperCAmelCase__ (self , A ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(A , 3.0 )) ))
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Union[str, Any] = nn.Linear(A , out_features * 2 , bias=A )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Any = self.scale_bias(A )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = torch.chunk(A , 2 , -1 )
lowerCamelCase_ : Optional[Any] = x * (1 + scale) + shift
return x
| 318
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318
| 1
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _A ( snake_case ) -> List[str]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def _A ( snake_case ) -> Optional[int]:
_lowercase : Optional[Any] = np.max(_outputs , axis=-1 , keepdims=snake_case )
_lowercase : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case )
class a__ ( lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = """sigmoid"""
_SCREAMING_SNAKE_CASE : Optional[int] = """softmax"""
_SCREAMING_SNAKE_CASE : List[str] = """none"""
@add_end_docstrings(
lowerCAmelCase_ , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n ' , )
class a__ ( lowerCAmelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = ClassificationFunction.NONE
def __init__( self , **_UpperCamelCase ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowerCamelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="" , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = tokenizer_kwargs
_lowercase : Optional[int] = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
_lowercase : Any = self.model.config.return_all_scores
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or top_k is None:
_lowercase : Tuple = top_k
_lowercase : Tuple = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __SCREAMING_SNAKE_CASE , )
if return_all_scores:
_lowercase : str = None
else:
_lowercase : Dict = 1
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowercase : Optional[int] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowercase : Any = "top_k" not in kwargs
if isinstance(args[0] , __SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = self.framework
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.tokenizer(**__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , __SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.model(**__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 , _UpperCamelCase=True ):
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowercase : str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowercase : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
_lowercase : Any = self.model.config.function_to_apply
else:
_lowercase : Any = ClassificationFunction.NONE
_lowercase : Dict = model_outputs["logits"][0]
_lowercase : Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowercase : str = sigmoid(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowercase : str = softmax(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
_lowercase : Union[str, Any] = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowercase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda _UpperCamelCase : x["score"] , reverse=__SCREAMING_SNAKE_CASE )
if top_k is not None:
_lowercase : List[Any] = dict_scores[:top_k]
return dict_scores
| 352
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_snake_case = logging.getLogger(__name__)
_snake_case = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] = 'bertabs'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=512 , _UpperCamelCase=6 , _UpperCamelCase=512 , _UpperCamelCase=8 , _UpperCamelCase=512 , _UpperCamelCase=0.2 , _UpperCamelCase=6 , _UpperCamelCase=768 , _UpperCamelCase=8 , _UpperCamelCase=2048 , _UpperCamelCase=0.2 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : List[str] = vocab_size
_lowercase : Tuple = max_pos
_lowercase : int = enc_layers
_lowercase : str = enc_hidden_size
_lowercase : Optional[Any] = enc_heads
_lowercase : Union[str, Any] = enc_ff_size
_lowercase : Tuple = enc_dropout
_lowercase : Dict = dec_layers
_lowercase : List[str] = dec_hidden_size
_lowercase : List[str] = dec_heads
_lowercase : str = dec_ff_size
_lowercase : str = dec_dropout
| 199
| 0
|
import math
def lowerCAmelCase__ ( a__: int ) -> str:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while num > 0:
_UpperCAmelCase = num % 8
_UpperCAmelCase = octal + (remainder * math.floor(math.pow(1_0 , a__ ) ))
counter += 1
_UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(a__ )}'''
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(6_5 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_1_6 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 329
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329
| 1
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class snake_case ( UpperCAmelCase ):
__magic_name__ = '''segformer'''
def __init__( self : Optional[Any] , A : int=3 , A : Any=4 , A : Dict=[2, 2, 2, 2] , A : List[Any]=[8, 4, 2, 1] , A : Union[str, Any]=[3_2, 6_4, 1_6_0, 2_5_6] , A : Union[str, Any]=[7, 3, 3, 3] , A : List[str]=[4, 2, 2, 2] , A : Dict=[1, 2, 5, 8] , A : Tuple=[4, 4, 4, 4] , A : Any="gelu" , A : int=0.0 , A : Union[str, Any]=0.0 , A : Optional[Any]=0.1 , A : int=0.02 , A : Optional[Any]=0.1 , A : Tuple=1E-6 , A : int=2_5_6 , A : List[str]=2_5_5 , **A : int , ):
'''simple docstring'''
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , A , )
_A : List[Any] = num_channels
_A : Union[str, Any] = num_encoder_blocks
_A : Tuple = depths
_A : Tuple = sr_ratios
_A : Tuple = hidden_sizes
_A : Optional[Any] = patch_sizes
_A : List[str] = strides
_A : List[Any] = mlp_ratios
_A : Any = num_attention_heads
_A : str = hidden_act
_A : Tuple = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : Union[str, Any] = classifier_dropout_prob
_A : List[Any] = initializer_range
_A : List[str] = drop_path_rate
_A : int = layer_norm_eps
_A : Union[str, Any] = decoder_hidden_size
_A : Dict = kwargs.get('reshape_last_stage' , A )
_A : Any = semantic_loss_ignore_index
class snake_case ( UpperCAmelCase ):
__magic_name__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return 1_2
| 351
|
"""simple docstring"""
import os
import sys
_UpperCamelCase : str = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_UpperCamelCase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case (*A_ :Optional[int] , **A_ :int ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case (*A_ :Optional[int] , **A_ :List[Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case (*A_ :Optional[Any] , **A_ :Tuple ):
'''simple docstring'''
return AutoModel.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case (*A_ :str , **A_ :Optional[int] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case (*A_ :Optional[Any] , **A_ :Dict ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case (*A_ :Dict , **A_ :str ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A_ , **A_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case (*A_ :Dict , **A_ :List[Any] ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A_ , **A_ )
| 186
| 0
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
print('Loading config file...' )
def flatten_yaml_as_dict(lowercase__ , lowercase__="" , lowercase__="." ):
__lowercase= []
for k, v in d.items():
__lowercase= parent_key + sep + k if parent_key else k
if isinstance(lowercase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase__ , lowercase__ , sep=lowercase__ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase__ )
__lowercase= argparse.Namespace()
with open(lowercase__ , 'r' ) as yaml_file:
try:
__lowercase= yaml.load(lowercase__ , Loader=yaml.FullLoader )
__lowercase= flatten_yaml_as_dict(lowercase__ )
for k, v in flat_cfg.items():
setattr(lowercase__ , lowercase__ , lowercase__ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(lowercase__ , str(lowercase__ ) ) )
return config
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= MobileViTVaConfig()
__lowercase= False
# dataset
if task_name.startswith('imagenet1k_' ):
__lowercase= 1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
__lowercase= 3_8_4
else:
__lowercase= 2_5_6
__lowercase= 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
__lowercase= 2_1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
__lowercase= 3_8_4
else:
__lowercase= 2_5_6
__lowercase= 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
__lowercase= 1_5_1
__lowercase= 5_1_2
__lowercase= 'ade20k-id2label.json'
__lowercase= True
elif task_name.startswith('voc_' ):
__lowercase= 2_1
__lowercase= 5_1_2
__lowercase= 'pascal-voc-id2label.json'
__lowercase= True
# orig_config
__lowercase= load_orig_config_file(lowercase__ )
assert getattr(lowercase__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
__lowercase= getattr(lowercase__ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(lowercase__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowercase= getattr(lowercase__ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowercase= getattr(lowercase__ , 'model.segmentation.output_stride' , 1_6 )
if "_deeplabv3" in task_name:
__lowercase= getattr(lowercase__ , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] )
__lowercase= getattr(lowercase__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 )
__lowercase= getattr(lowercase__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
__lowercase= 'huggingface/label-files'
__lowercase= json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
__lowercase= {int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase= idalabel
__lowercase= {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= dct.pop(lowercase__ )
__lowercase= val
def _lowerCamelCase( lowercase__ , lowercase__=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__lowercase= ''
else:
__lowercase= 'mobilevitv2.'
__lowercase= []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowercase= k[8:]
else:
__lowercase= k
if ".block." in k:
__lowercase= k_new.replace('.block.' , '.' )
if ".conv." in k:
__lowercase= k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
__lowercase= k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
__lowercase= k_new.replace('conv_1.' , F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
__lowercase= k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
__lowercase= k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
__lowercase= k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
__lowercase= k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
__lowercase= k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
__lowercase= k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
__lowercase= [0, 1]
elif i == 4:
__lowercase= [0, 1, 2, 3]
elif i == 5:
__lowercase= [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
__lowercase= k_new.replace(
F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
__lowercase= k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
__lowercase= k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
__lowercase= k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
__lowercase= k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
__lowercase= k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
__lowercase= k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
__lowercase= k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
__lowercase= k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
__lowercase= k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
__lowercase= k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
__lowercase= k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(lowercase__ )
for k in keys_to_ignore:
state_dict.pop(lowercase__ , lowercase__ )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= get_mobilevitva_config(lowercase__ , lowercase__ )
# load original state_dict
__lowercase= torch.load(lowercase__ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
__lowercase= MobileViTVaForSemanticSegmentation(lowercase__ ).eval()
__lowercase= False
else:
__lowercase= MobileViTVaForImageClassification(lowercase__ ).eval()
__lowercase= False
# remove and rename some keys of load the original model
__lowercase= checkpoint
remove_unused_keys(lowercase__ )
__lowercase= create_rename_keys(lowercase__ , base_model=lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load modified state_dict
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowercase= MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__lowercase= image_processor(images=prepare_img() , return_tensors='pt' )
__lowercase= model(**lowercase__ )
# verify classification model
if task_name.startswith('imagenet' ):
__lowercase= outputs.logits
__lowercase= logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowercase= torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 295
|
from collections.abc import Sequence
def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__lowercase= 0 if allow_empty_subarrays else float('-inf' )
__lowercase= 0.0
for num in arr:
__lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowercase= max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 295
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_snake_case = datasets.utils.logging.get_logger(__name__)
_snake_case = ['names', 'prefix']
_snake_case = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_snake_case = ['encoding_errors', 'on_bad_lines']
_snake_case = ['date_format']
@dataclass
class a__ ( datasets.BuilderConfig ):
_SCREAMING_SNAKE_CASE : str = ","
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int], str]] = "infer"
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[List[str]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[int, str, List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[Union[List[int], List[str]]] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[Literal["c", "python", "pyarrow"]] = None
_SCREAMING_SNAKE_CASE : Dict[Union[int, str], Callable[[Any], Any]] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : Optional[list] = None
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[Union[int, List[int]]] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = "."
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : str = '"'
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : Optional[str] = None
_SCREAMING_SNAKE_CASE : int = 1_0000
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
_SCREAMING_SNAKE_CASE : Optional[str] = "strict"
_SCREAMING_SNAKE_CASE : Literal["error", "warn", "skip"] = "error"
_SCREAMING_SNAKE_CASE : Optional[str] = None
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.delimiter is not None:
_lowercase : Union[str, Any] = self.delimiter
if self.column_names is not None:
_lowercase : List[str] = self.column_names
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class a__ ( datasets.ArrowBasedBuilder ):
_SCREAMING_SNAKE_CASE : str = CsvConfig
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_lowercase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCamelCase , (str, list, tuple) ):
_lowercase : Tuple = data_files
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Dict = [files]
_lowercase : Tuple = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_lowercase : str = []
for split_name, files in data_files.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowercase : List[str] = [files]
_lowercase : Dict = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCamelCase , gen_kwargs={"files": files} ) )
return splits
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.config.features is not None:
_lowercase : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(_UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowercase : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowercase : Optional[int] = table_cast(_UpperCamelCase , _UpperCamelCase )
return pa_table
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowercase : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCamelCase ) ):
_lowercase : Any = pd.read_csv(_UpperCamelCase , iterator=_UpperCamelCase , dtype=_UpperCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_UpperCamelCase ):
_lowercase : Tuple = pa.Table.from_pandas(_UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_UpperCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_UpperCamelCase )}: {e}''' )
raise
| 199
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : str = 'OwlViTImageProcessor'
_SCREAMING_SNAKE_CASE : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCamelCase , )
_lowercase : Optional[int] = kwargs.pop("feature_extractor" )
_lowercase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="max_length" , _UpperCamelCase="np" , **_UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )):
_lowercase : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ):
_lowercase : str = []
# Maximum number of queries across batch
_lowercase : str = max([len(_UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCamelCase ) != max_num_queries:
_lowercase : List[Any] = t + [" "] * (max_num_queries - len(_UpperCamelCase ))
_lowercase : Tuple = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
encodings.append(_UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_lowercase : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowercase : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : int = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowercase : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_lowercase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowercase : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_lowercase : Optional[int] = BatchEncoding()
_lowercase : List[Any] = input_ids
_lowercase : Dict = attention_mask
if query_images is not None:
_lowercase : int = BatchEncoding()
_lowercase : Any = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values
_lowercase : Any = query_pixel_values
if images is not None:
_lowercase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
_lowercase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCamelCase , )
return self.image_processor
| 199
| 1
|
'''simple docstring'''
from maths.prime_check import is_prime
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ):
A_ : int = F"Input value of [number={number}] must be an integer"
raise TypeError(a_ )
if is_prime(a_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = JukeboxTokenizer
a : Optional[int] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
import torch
UpperCamelCase__ : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCamelCase__ : List[str] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase__ : Dict = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
import torch
UpperCamelCase__ : str = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCamelCase__ : Dict = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase__ : Optional[int] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
| 365
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : Dict = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCamelCase__ : Any = parser.parse_args()
return args
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> Any:
def fn(__UpperCAmelCase: Dict ):
return tokenizer(examples['''text'''] )
return fn
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Dict:
UpperCamelCase__ : Optional[int] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCamelCase__ : Dict = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCamelCase__ : int = tf.train.Features(feature=__UpperCAmelCase )
UpperCamelCase__ : Tuple = tf.train.Example(features=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = example.SerializeToString()
records.append(__UpperCAmelCase )
return records
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> int:
UpperCamelCase__ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase__ : int = min(len(__UpperCAmelCase ) , args.limit )
UpperCamelCase__ : Optional[int] = dataset.select(range(__UpperCAmelCase ) )
print(f"Limiting the dataset to {args.limit} entries." )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase__ : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
UpperCamelCase__ : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase__ : Optional[int] = tokenize_function(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCAmelCase: Optional[Any] ):
# Concatenate all texts.
UpperCamelCase__ : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase__ : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase__ : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase__ : Dict = {
k: [t[i : i + args.max_length] for i in range(0 , __UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase__ : Optional[Any] = dataset_tokenized.map(__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=1000 , num_proc=4 )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Optional[Any] = 0
for shard in range(0 , len(__UpperCAmelCase ) , args.shard_size ):
UpperCamelCase__ : Optional[int] = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase__ : Any = len(dataset_snapshot['''input_ids'''] )
UpperCamelCase__ : Optional[int] = os.path.join(__UpperCAmelCase , f"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase__ : List[str] = get_serialized_examples(__UpperCAmelCase )
with tf.io.TFRecordWriter(__UpperCAmelCase ) as out_file:
for i in range(len(__UpperCAmelCase ) ):
UpperCamelCase__ : str = serialized_examples[i]
out_file.write(__UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCAmelCase , __UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(f"Total {args.split} records: {total_records}" , file=__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 247
| 0
|
'''simple docstring'''
import random
from typing import Any
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]:
for _ in range(len(a__ ) ):
lowerCamelCase__ : Tuple = random.randint(0 , len(a__ ) - 1 )
lowerCamelCase__ : List[str] = random.randint(0 , len(a__ ) - 1 )
lowerCamelCase__ : str = data[b], data[a]
return data
if __name__ == "__main__":
_A : Optional[int] =[0, 1, 2, 3, 4, 5, 6, 7]
_A : Optional[int] =["""python""", """says""", """hello""", """!"""]
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 41
|
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = str(a__)
return len(a__) == 9 and set(a__) == set("""123456789""")
def _UpperCAmelCase ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1):
a_ : Dict = 1_0_0_0_0_2 * base_num
if is_9_pandigital(a__):
return candidate
for base_num in range(3_3_3 , 9_9 , -1):
a_ : Tuple = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(a__):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248
| 0
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def a__ ( UpperCAmelCase : Vector , UpperCAmelCase : Vector ) -> Optional[Any]:
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def a__ ( UpperCAmelCase : Vector , UpperCAmelCase : Vector ) -> List[Any]:
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def a__ ( ) -> Optional[int]:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 360
|
def a__ ( UpperCAmelCase : int ) -> bool:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : List[str] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase )
if number < 0:
return False
UpperCAmelCase : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
| 0
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any ={
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
UpperCAmelCase , UpperCAmelCase : Optional[int] =input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase : int =f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
assert base_extractor.is_extractable(__lowerCAmelCase )
UpperCAmelCase : str =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase : List[str] =file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase : Optional[Any] =output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase : List[str] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> str:
'''simple docstring'''
UpperCAmelCase : Any ={
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
UpperCAmelCase : Optional[int] =input_paths[compression_format]
if input_path is None:
UpperCAmelCase : Any =f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
UpperCAmelCase : Dict =Extractor.infer_extractor_format(__lowerCAmelCase )
assert extractor_format is not None
UpperCAmelCase : List[Any] =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase : Tuple =file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase : List[str] =output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase : List[Any] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
import tarfile
UpperCAmelCase : Optional[int] =tmp_path / '''data_dot_dot'''
directory.mkdir()
UpperCAmelCase : Tuple =directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f:
f.add(__lowerCAmelCase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
import tarfile
UpperCAmelCase : List[str] =tmp_path / '''data_sym_link'''
directory.mkdir()
UpperCAmelCase : str =directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__lowerCAmelCase )
with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict ={
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
UpperCAmelCase : Tuple =insecure_tar_files[insecure_tar_file]
UpperCAmelCase : int =tmp_path / '''extracted'''
TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Dict =tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase : Union[str, Any] =(
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__lowerCAmelCase )
assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
| 348
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__lowerCamelCase : Tuple = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
__lowerCamelCase : Optional[Any] = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__lowerCamelCase : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__lowerCamelCase : Optional[int] = requests.get(image_url).content
__lowerCamelCase : Optional[int] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 204
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
SCREAMING_SNAKE_CASE__ = Vector()
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowercase ) , """(0,0,0,0,0,1)""" )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowercase ) , 4 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2] )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4, 5] )
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([2, -1, 4] ) # for test of dot product
SCREAMING_SNAKE_CASE__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __a ( self : str ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , """(3,4,7)""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = x.copy()
self.assertEqual(str(_lowercase ) , str(_lowercase ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowercase ) , """(0,1,0)""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 204
| 1
|
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
while num > 0:
UpperCAmelCase__ = num % 8
UpperCAmelCase__ = octal + (remainder * math.floor(math.pow(10 , SCREAMING_SNAKE_CASE__ ) ))
counter += 1
UpperCAmelCase__ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(SCREAMING_SNAKE_CASE__ )}'''
def _UpperCamelCase ( ):
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 346
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ):
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ):
pass
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None
with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__"""
UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ = """__test_patch_submodule_successive_join__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__"""
UpperCAmelCase__ = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ):
with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ):
pass
| 346
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->List[Any]:
lowerCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=A ).to(A )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase__ : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowerCamelCase__ : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowerCamelCase__ : Dict = model(input_ids.to(A ) , labels=labels.to(A ) ).loss
lowerCamelCase__ : Tuple = -(labels.shape[-1] * loss.item())
lowerCamelCase__ : int = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 265
|
import pprint
import requests
_A : Any = 'https://zenquotes.io/api'
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A : Optional[Any] = random_quotes()
pprint.pprint(response)
| 265
| 1
|
'''simple docstring'''
a_ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 152
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a_ = datasets.logging.get_logger(__name__)
a_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
a_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
a_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
a_ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : Any ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __magic_name__ ( self : str , __lowercase : Any ) -> List[Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
SCREAMING_SNAKE_CASE__ : Tuple ='''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE__ : List[str] =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE__ : Any =score.BleurtScorer(os.path.join(__lowercase , __lowercase ) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Any , __lowercase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.scorer.score(references=__lowercase , candidates=__lowercase )
return {"scores": scores}
| 152
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCamelCase = logging.getLogger(__name__)
__UpperCamelCase = tf.data.AUTOTUNE
def UpperCAmelCase ( ) -> Dict:
snake_case_ = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=SCREAMING_SNAKE_CASE__ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=SCREAMING_SNAKE_CASE__ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=SCREAMING_SNAKE_CASE__ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=SCREAMING_SNAKE_CASE__ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=SCREAMING_SNAKE_CASE__ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=SCREAMING_SNAKE_CASE__ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=SCREAMING_SNAKE_CASE__ , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=SCREAMING_SNAKE_CASE__ , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE__ , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=SCREAMING_SNAKE_CASE__ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=SCREAMING_SNAKE_CASE__ , help='Model ID to upload to on the Hugging Face Hub.' )
snake_case_ = parser.parse_args()
return args
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE__ )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE__ )
return tpu
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split('/' )[-1]
snake_case_ = re.search(R'-\d+-(\d+)\.tfrecord' , SCREAMING_SNAKE_CASE__ ).group(1 )
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Dict:
snake_case_ = count_samples(SCREAMING_SNAKE_CASE__ )
snake_case_ = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE__ )
if shuffle:
snake_case_ = dataset.shuffle(len(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE__ , num_parallel_reads=SCREAMING_SNAKE_CASE__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.map(SCREAMING_SNAKE_CASE__ , num_parallel_calls=SCREAMING_SNAKE_CASE__ )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(SCREAMING_SNAKE_CASE__ , drop_remainder=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.map(SCREAMING_SNAKE_CASE__ , num_parallel_calls=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.prefetch(SCREAMING_SNAKE_CASE__ )
return dataset
def UpperCAmelCase ( UpperCAmelCase ) -> str:
if not args.no_tpu:
snake_case_ = initialize_tpu(SCREAMING_SNAKE_CASE__ )
snake_case_ = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
snake_case_ = count_samples(SCREAMING_SNAKE_CASE__ )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE__ , metrics=['accuracy'] )
def decode_fn(UpperCAmelCase ):
snake_case_ = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
def mask_with_collator(UpperCAmelCase ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(SCREAMING_SNAKE_CASE__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE__ , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
SCREAMING_SNAKE_CASE__ , decode_fn=SCREAMING_SNAKE_CASE__ , mask_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
SCREAMING_SNAKE_CASE__ , decode_fn=SCREAMING_SNAKE_CASE__ , mask_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE__ ) )
model.fit(
SCREAMING_SNAKE_CASE__ , validation_data=SCREAMING_SNAKE_CASE__ , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCamelCase = parse_args()
main(args)
| 367
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
snake_case_ = None
class __SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
snake_case_ = PandasConfig
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__ ( self : List[str] , __lowercase : Union[str, Any] ) -> Any:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE__ : List[str] =dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ : Dict =data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : int =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ : List[str] =[dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE__ : Any =[]
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ : List[str] =[dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def __magic_name__ ( self : str , __lowercase : pa.Table ) -> Tuple:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ : int =table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def __magic_name__ ( self : Any , __lowercase : str ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
with open(UpperCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] =pa.Table.from_pandas(pd.read_pickle(UpperCAmelCase_ ) )
yield i, self._cast_table(UpperCAmelCase_ )
| 152
|
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A : List[str] = logging.get_logger(__name__)
A : Union[str, Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] ="deberta-v2"
def __init__( self , __a=12_81_00 , __a=15_36 , __a=24 , __a=24 , __a=61_44 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=0 , __a=0.0_2 , __a=1e-7 , __a=False , __a=-1 , __a=0 , __a=True , __a=None , __a=0 , __a="gelu" , **__a , ):
super().__init__(**_lowerCAmelCase )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = relative_attention
__lowerCAmelCase = max_relative_positions
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
__lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("|" )]
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = kwargs.get("pooler_hidden_size" , _lowerCAmelCase )
__lowerCAmelCase = pooler_dropout
__lowerCAmelCase = pooler_hidden_act
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def snake_case ( self ):
return 12
def snake_case ( self , __a , __a = -1 , __a = -1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 40 , __a = 40 , __a = None , ):
__lowerCAmelCase = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase , framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 370
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [False] * len(_UpperCamelCase )
__lowerCAmelCase = []
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [-1] * (len(_UpperCamelCase ))
__lowerCAmelCase = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = float("Inf" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
return max_flow
A : Optional[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : str = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 112
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Any = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 112
| 1
|
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_a , int(b / 2 ) ) * actual_power(_a , int(b / 2 ) )
else:
return a * actual_power(_a , int(b / 2 ) ) * actual_power(_a , int(b / 2 ) )
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(_a , _a )
return actual_power(_a , _a )
if __name__ == "__main__":
print(power(-2, -3))
| 59
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = "dinat"
A__ : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Any ,lowerCamelCase_: Any=4 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: Optional[int]=[3, 4, 6, 5] ,lowerCamelCase_: int=[2, 4, 8, 16] ,lowerCamelCase_: Optional[int]=7 ,lowerCamelCase_: Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,lowerCamelCase_: Tuple=3.0 ,lowerCamelCase_: Any=True ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[Any]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: List[Any]=1e-5 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,**lowerCamelCase_: Dict ,) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Union[str, Any] = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = num_heads
UpperCAmelCase_ : Tuple = kernel_size
UpperCAmelCase_ : int = dilations
UpperCAmelCase_ : Optional[Any] = mlp_ratio
UpperCAmelCase_ : Optional[Any] = qkv_bias
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCAmelCase_ : Optional[int] = layer_scale_init_value
UpperCAmelCase_ : List[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(lowerCamelCase_ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
| 59
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCamelCase :
'''simple docstring'''
__snake_case = MBartConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=13 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=20 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Union[str, Any]=0 , ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[Any] =parent
A__ : Dict =batch_size
A__ : Optional[int] =seq_length
A__ : List[str] =is_training
A__ : Tuple =use_labels
A__ : int =vocab_size
A__ : Dict =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Tuple =max_position_embeddings
A__ : Optional[int] =eos_token_id
A__ : Optional[int] =pad_token_id
A__ : Any =bos_token_id
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : Optional[int] =tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : List[str] =prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase__ ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
A__ : Optional[Any] =inputs_dict["""input_ids"""]
A__ : Optional[Any] =input_ids[:1, :]
A__ : str =inputs_dict["""attention_mask"""][:1, :]
A__ : str =inputs_dict["""head_mask"""]
A__ : List[Any] =1
# first forward pass
A__ : Optional[Any] =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
A__ , A__ : Union[str, Any] =outputs.to_tuple()
A__ : Optional[Any] =past_key_values[1]
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : int, __snake_case : Union[str, Any]=None, __snake_case : int=None, __snake_case : int=None, __snake_case : Optional[Any]=None, __snake_case : str=None, ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
A__ : Dict =tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
A__ : Optional[int] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
A__ : Optional[Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : Any =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__snake_case = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def lowercase__ ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =TFMBartModelTester(self )
A__ : Tuple =ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
' UN Chief Says There Is No Military Solution in Syria',
]
__snake_case = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__snake_case = 'facebook/mbart-large-en-ro'
@cached_property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase__ ( self : Any , **lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors="""tf""" )
A__ : Optional[int] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
A__ : Tuple =self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 134
|
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Dict=64 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str=5_12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Union[str, Any]=1 , ) -> List[Any]:
'''simple docstring'''
A__ : Dict =parent
A__ : Optional[int] =batch_size
A__ : List[Any] =seq_length
A__ : Any =is_training
A__ : List[str] =use_input_mask
A__ : str =use_token_type_ids
A__ : Tuple =use_labels
A__ : Tuple =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Dict =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Any =type_sequence_label_size
A__ : int =initializer_range
A__ : str =num_labels
A__ : Optional[int] =num_choices
A__ : Optional[int] =scope
A__ : List[str] =q_groups
A__ : Dict =k_groups
A__ : Any =v_groups
A__ : Optional[Any] =post_attention_groups
A__ : Optional[int] =intermediate_groups
A__ : Optional[int] =output_groups
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] =None
if self.use_input_mask:
A__ : str =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] =None
A__ : Tuple =None
A__ : Dict =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : int =ids_tensor([self.batch_size] , self.num_choices )
A__ : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =SqueezeBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
A__ : str =SqueezeBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Dict =self.num_labels
A__ : int =SqueezeBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : str =self.num_labels
A__ : int =SqueezeBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.num_choices
A__ : Dict =SqueezeBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : Any =self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : Any =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = True
__snake_case = False
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModelTester(self )
A__ : int =ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int =SqueezeBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
A__ : List[str] =torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
A__ : Tuple =model(lowerCAmelCase_ )[0]
A__ : Union[str, Any] =torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : Tuple =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
| 134
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : Tuple = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350
|
"""simple docstring"""
def _lowerCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 248
| 0
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase__ : str = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
_snake_case : List[str] = None
_snake_case : Tuple = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
_snake_case : Dict = datasets.Audio()
_snake_case : Optional[Any] = 'audio'
_snake_case : str = AudioFolderConfig
_snake_case : Union[str, Any] = 4_2 # definition at the bottom of the script
_snake_case : Tuple = AudioClassification(audio_column='audio' , label_column='label' )
lowercase__ : Tuple = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowercase__ : Dict = AUDIO_EXTENSIONS
| 324
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291
| 0
|
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = x
_A : Optional[Any] = y
for step in range(snake_case_ ): # noqa: B007
_A : List[Any] = a * a - b * b + x
_A : str = 2 * a * b + y
_A : int = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( snake_case_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( snake_case_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case_,1,1 ) )
def lowerCAmelCase_ ( snake_case_ = 800,snake_case_ = 600,snake_case_ = -0.6,snake_case_ = 0,snake_case_ = 3.2,snake_case_ = 50,snake_case_ = True,):
_A : Union[str, Any] = Image.new("""RGB""",(image_width, image_height) )
_A : List[str] = img.load()
# loop through the image-coordinates
for image_x in range(snake_case_ ):
for image_y in range(snake_case_ ):
# determine the figure-coordinates based on the image-coordinates
_A : Dict = figure_width / image_width * image_height
_A : Optional[int] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A : Optional[int] = get_distance(snake_case_,snake_case_,snake_case_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A : Optional[Any] = get_color_coded_rgb(snake_case_ )
else:
_A : Any = get_black_and_white_rgb(snake_case_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_snake_case = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 343
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343
| 1
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase : Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
lowercase : Dict = 'sshleifer/student_marian_en_ro_6_1'
lowercase : Optional[int] = 'sshleifer/tiny-mbart'
@require_torch
class A ( __snake_case ):
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , ) -> str:
"""simple docstring"""
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , )
A : List[Any] = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
A : Dict = [log for log in logs if '''eval_loss''' in log.keys()]
A : List[str] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE )
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE )
@require_apex
@require_torch_gpu
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
A : Any = experiments[experiment_id]
A : Any = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
A : Union[str, Any] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE , extra_args_str=data['''extra_args_str'''] )
A : int = len(re.findall(SCREAMING_SNAKE_CASE , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE , data['''n_matches'''] )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
A : Union[str, Any] = [log for log in logs if '''eval_loss''' in log.keys()]
A : List[str] = eval_metrics[0]
A : List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE )
# test if do_predict saves generations and metrics
A : int = os.listdir(SCREAMING_SNAKE_CASE )
A : Optional[int] = {os.path.basename(SCREAMING_SNAKE_CASE ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE ) -> Tuple[int, float]:
A : Optional[int] = '''--skip_memory_metrics 0'''
A : List[Any] = self.run_trainer(
max_len=128 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , n_gpus_to_use=1 , )
# Check metrics
A : str = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
A : Union[str, Any] = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
A : int = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
A : List[Any] = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A, A, A : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A, A, A : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Union[str, Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Optional[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : str = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : List[str] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' , )
self.assertGreater(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' , )
self.assertEqual(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3e-3 , SCREAMING_SNAKE_CASE = "adafactor" , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , ) -> Tuple:
"""simple docstring"""
A : Tuple = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
A : Dict = self.get_auto_remove_tmp_dir()
A : int = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(SCREAMING_SNAKE_CASE )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A : Any = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(SCREAMING_SNAKE_CASE )}\n '.split()
A : Optional[Any] = '''
--do_predict
'''.split()
A : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Tuple = get_torch_dist_unique_port()
A : str = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A : str = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
else:
A : List[str] = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ):
main()
return output_dir
| 3
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78
| 0
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Optional[Any] , _A : Optional[Any] , _A : int , _A : Union[str, Any]) -> Union[str, Any]:
self.assertEqual(len(_A) , len(_A))
for a, b in zip(_A , _A):
self.assertAlmostEqual(_A , _A , delta=_A)
def _lowercase (self : int) -> int:
__snake_case : Optional[int] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0])])
accumulator([tf.constant([-2.0, 1.0])])
accumulator([tf.constant([-1.0, 2.0])])
with self.assertRaises(_A):
accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])])
self.assertEqual(accumulator.step , 3)
self.assertEqual(len(accumulator.gradients) , 1)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2)
accumulator.reset()
self.assertEqual(accumulator.step , 0)
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2)
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
__snake_case : Any = None
ops.enable_eager_execution_internal()
__snake_case : Tuple = tf.config.list_physical_devices('CPU')
if len(_A) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()])
__snake_case : Union[str, Any] = tf.config.list_logical_devices(device_type='CPU')
__snake_case : Dict = tf.distribute.MirroredStrategy(devices=devices[:2])
with strategy.scope():
__snake_case : Dict = GradientAccumulator()
__snake_case : Any = tf.Variable([4.0, 3.0])
__snake_case , __snake_case : Dict = create_optimizer(5E-5 , 10 , 5)
__snake_case : Optional[int] = tf.Variable([0.0, 0.0] , trainable=_A)
def accumulate_on_replica(_A : Tuple):
accumulator([gradient])
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable])))
@tf.function
def accumulate(_A : Optional[Any] , _A : Union[str, Any]):
with strategy.scope():
__snake_case : Tuple = strategy.experimental_local_results(_A)
local_variables[0].assign(_A)
local_variables[1].assign(_A)
strategy.run(_A , args=(gradient_placeholder,))
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_A)
def _check_local_values(_A : Tuple , _A : List[Any]):
__snake_case : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0])
self.assertListAlmostEqual(values[0].value() , _A , tol=1E-2)
self.assertListAlmostEqual(values[1].value() , _A , tol=1E-2)
accumulate([1.0, 2.0] , [-1.0, 1.0])
accumulate([3.0, -1.0] , [-1.0, -1.0])
accumulate([-2.0, 2.0] , [3.0, -2.0])
self.assertEqual(accumulator.step , 3)
_check_local_values([2.0, 3.0] , [1.0, -2.0])
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2)
accumulator.reset()
self.assertEqual(accumulator.step , 0)
_check_local_values([0.0, 0.0] , [0.0, 0.0])
| 95
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_a : Dict= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : List[str] , *_A : Dict , **_A : Optional[Any]) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 95
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214
|
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ):
"""simple docstring"""
a :Any = set(range(3 , UpperCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) )
a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3
| 0
|
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Any ) -> str:
"""simple docstring"""
return abs(lowerCamelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
a_ : Optional[Any] = y, x % y
return abs(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
try:
a_ : str = input('Enter two integers separated by comma (,): ' ).split(',' )
a_ : Union[str, Any] = int(nums[0] )
a_ : Optional[Any] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(lowerCamelCase__ , lowerCamelCase__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCamelCase__ , lowerCamelCase__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 32
|
'''simple docstring'''
from math import pow
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ : Optional[int] = int(pow(lowerCamelCase__ , lowerCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_, A_ : int = backtrack(
lowerCamelCase__ , lowerCamelCase__ , current_number + 1 , lowerCamelCase__ , lowerCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_, A_ : int = backtrack(
lowerCamelCase__ , lowerCamelCase__ , current_number + 1 , lowerCamelCase__ , lowerCamelCase__ )
return current_sum, solutions_count
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowerCamelCase__ , lowerCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> List[Any]:
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=snake_case_ )
__snake_case = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(snake_case_ )
EnvironmentCommand.register_subcommand(snake_case_ )
TestCommand.register_subcommand(snake_case_ )
RunBeamCommand.register_subcommand(snake_case_ )
DummyDataCommand.register_subcommand(snake_case_ )
# Parse args
__snake_case , __snake_case = parser.parse_known_args()
if not hasattr(snake_case_ , '''func''' ):
parser.print_help()
exit(1 )
__snake_case = parse_unknown_args(snake_case_ )
# Run
__snake_case = args.func(snake_case_ , **snake_case_ )
service.run()
if __name__ == "__main__":
main()
| 238
|
from __future__ import annotations
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = 'MIT'
snake_case_ = '1.0.0'
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = 'contact@muhammadumerfarooq.me'
snake_case_ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : str ):
"""simple docstring"""
super().__init__()
__snake_case = []
__snake_case = domain
def a (self : Tuple , a__ : str , a__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , a__ )
self.urls.append(a__ )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return ".".join(get_sub_domain_name(snake_case_ ).split('''.''' )[-2:] )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return parse.urlparse(snake_case_ ).netloc
def lowerCamelCase__ ( snake_case_ : str = "https://github.com" ) -> list[str]:
__snake_case = get_domain_name(snake_case_ )
# Initialize the parser
__snake_case = Parser(snake_case_ )
try:
# Open URL
__snake_case = requests.get(snake_case_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(snake_case_ )
# Get the valid email.
__snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(snake_case_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(snake_case_ )
if __name__ == "__main__":
snake_case_ = emails_from_url('https://github.com')
print(F'{len(emails)} emails found:')
print('\n'.join(sorted(emails)))
| 238
| 1
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=8 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=16 , lowercase_=5 , lowercase_=2 , lowercase_=36 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : int = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Any = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Any = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.get_config()
UpperCAmelCase_ : List[Any] = 300
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MraModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Optional[int] = model(lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase_ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = MraModel(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
UpperCAmelCase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , )
UpperCAmelCase_ : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = MraForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = MraForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.num_labels
UpperCAmelCase_ : Dict = MraForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : int = MraForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.num_choices
UpperCAmelCase_ : List[Any] = MraForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MraModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = MraModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason="MRA does not output attentions" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ : List[str] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(lowercase_ )[0]
UpperCAmelCase_ : str = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ : Optional[int] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowercase_ )[0]
UpperCAmelCase_ : Dict = 5_0265
UpperCAmelCase_ : str = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowercase_ )[0]
UpperCAmelCase_ : List[str] = 5_0265
UpperCAmelCase_ : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 61
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """Salesforce/blip-image-captioning-base"""
_lowerCAmelCase = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
_lowerCAmelCase = """image_captioner"""
_lowerCAmelCase = AutoModelForVisionaSeq
_lowerCAmelCase = ["""image"""]
_lowerCAmelCase = ["""text"""]
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
requires_backends(self , ['vision'] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[str]:
return self.pre_processor(images=__magic_name__ , return_tensors='pt' )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
return self.model.generate(**__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 104
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A () -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _A () -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _A () -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase__ ):
http_head('https://huggingface.co' )
| 104
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('foo.json',)] )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase , config_name=_UpperCamelCase )
UpperCAmelCase_ : str = GenerationConfig.from_pretrained(_UpperCamelCase , config_name=_UpperCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _UpperCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained('gpt2' )
UpperCAmelCase_ : Tuple = GenerationConfig.from_model_config(_UpperCamelCase )
UpperCAmelCase_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = GenerationConfig()
UpperCAmelCase_ : int = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
UpperCAmelCase_ : List[Any] = copy.deepcopy(_UpperCamelCase )
UpperCAmelCase_ : Tuple = generation_config.update(**_UpperCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCamelCase , {'foo': 'bar'} )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = GenerationConfig()
UpperCAmelCase_ : Union[str, Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : List[str] = GenerationConfig.from_pretrained(_UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
UpperCAmelCase_ : Tuple = GenerationConfig.from_model_config(_UpperCamelCase )
assert not hasattr(_UpperCamelCase , 'foo' ) # no new kwargs should be initialized if from config
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _UpperCamelCase )
self.assertEqual(default_config.num_beams , 1 )
UpperCAmelCase_ : List[Any] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _UpperCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : str = GenerationConfig.from_pretrained(_UpperCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _UpperCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ) -> Optional[int]:
UpperCAmelCase_ : Dict = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCamelCase , repo_id='test-generation-config' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[str] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCamelCase , repo_id='valid_org/test-generation-config-org' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
| 29
|
from math import factorial
__snake_case = {str(digit): factorial(digit) for digit in range(10)}
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : int = 60 , SCREAMING_SNAKE_CASE__ : int = 1000000 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCamelCase :Any = 0
# the cached sizes of the previous chains
UpperCamelCase :dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE__ ):
# The temporary set will contain the elements of the chain
UpperCamelCase :List[Any] = set()
UpperCamelCase :Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase :Optional[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE__ )
chain_set_length += 1
UpperCamelCase :List[Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase :Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 259
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return str(lowerCAmelCase_ ) == str(lowerCAmelCase_ )[::-1]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return int(lowerCAmelCase_ ) + int(str(lowerCAmelCase_ )[::-1] )
def UpperCAmelCase__ (lowerCAmelCase_ = 1_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for num in range(1 , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = num
while iterations < 50:
__SCREAMING_SNAKE_CASE = sum_reverse(lowerCAmelCase_ )
iterations += 1
if is_palindrome(lowerCAmelCase_ ):
break
else:
lychrel_nums.append(lowerCAmelCase_ )
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 195
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = R".*/layers_(\d+)"
__SCREAMING_SNAKE_CASE = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
__SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
a__ : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = str(activation[1] )
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
__SCREAMING_SNAKE_CASE = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flax_params["target"]
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ , sep="/" )
__SCREAMING_SNAKE_CASE = rename_keys(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 195
| 1
|
"""simple docstring"""
import math
def lowerCamelCase__ ( _lowerCamelCase : Any ) -> Union[str, Any]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : List[Any] = 0.1 ) -> Union[str, Any]:
lowerCamelCase_ = 3
lowerCamelCase_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowercase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = """dummy_data"""
lowerCamelCase__ = """datasets"""
lowerCamelCase__ = False
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ):
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Dict = dataset_name
_lowerCamelCase : Union[str, Any] = cache_dir
_lowerCamelCase : Dict = use_local_dummy_data
_lowerCamelCase : Tuple = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : str = str(lowercase )
# to be downloaded
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : int = None
@property
def A_ ( self ):
if self._dummy_file is None:
_lowerCamelCase : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def A_ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def A_ ( self ):
_lowerCamelCase : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : int = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase )
return os.path.join(lowercase , self.dummy_file_name )
@property
def A_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self ):
if self._bucket_url is None:
_lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def A_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def A_ ( self , lowercase , *lowercase ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase ):
return self.create_dummy_data_dict(lowercase , lowercase )
elif isinstance(lowercase , (list, tuple) ):
return self.create_dummy_data_list(lowercase , lowercase )
else:
return self.create_dummy_data_single(lowercase , lowercase )
def A_ ( self , lowercase , *lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , *lowercase , **lowercase ):
return path
def A_ ( self ):
return {}
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase ):
for single_url in single_urls:
download_callback(lowercase )
else:
_lowerCamelCase : List[Any] = single_urls
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase ):
_lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Optional[int] = single_urls
_lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) )
_lowerCamelCase : int = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url )
_lowerCamelCase : int = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : List[str] = [data_url[0]] * len(lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(lowercase )
return dummy_data_list
def A_ ( self , lowercase , lowercase ):
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self , lowercase ):
def _iter_archive_members(lowercase ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : str = Path(self.dummy_file ).parent
_lowerCamelCase : Union[str, Any] = path.relative_to(lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase )
_lowerCamelCase : Optional[int] = Path(lowercase )
_lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' )
def A_ ( self , lowercase ):
if not isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(lowercase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(lowercase , lowercase )
| 96
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = (DDPMParallelScheduler,)
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ : Any = samplea.shape[0]
SCREAMING_SNAKE_CASE_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ : str = torch.arange(lowerCAmelCase__ )[0:3, None].repeat(1 , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.batch_step_no_noise(lowerCAmelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE_ : int = pred_prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase__ ):
if i == len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = -1
else:
SCREAMING_SNAKE_CASE_ : List[str] = timesteps[i + 1]
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.previous_timestep(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = prev_t.item()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = [1_0_0, 8_7, 5_0, 1, 0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
| 162
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase__ : Optional[Any] ='src/transformers'
lowerCAmelCase__ : int ='docs/source/en/tasks'
def a__ ( A__, A__, A__ ):
with open(A__, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Any = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ : Any =direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase__ : Dict ={
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase__ : Union[str, Any] ={
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__, set() )
SCREAMING_SNAKE_CASE_ : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( A__, A__=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = _find_text_in_file(
filename=os.path.join(A__, A__ ), start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->', end_prompt='<!--End of the generated tip-->', )
SCREAMING_SNAKE_CASE_ : str = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__, A__ ), 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 162
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = embedding_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_hidden_groups
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ) -> str:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = AlbertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCamelCase = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = AlbertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , sentence_order_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = AlbertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = AlbertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = AlbertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = AlbertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = AlbertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,(
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( __a , __a , unittest.TestCase ):
lowercase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = AlbertModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def A__ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = AlbertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 321
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Tuple = ['''keras_nlp''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""keras_nlp"""])
| 100
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A ( A_ ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =4_2
UpperCamelCase__ : List[str] =jnp.floataa
UpperCamelCase__ : Any =True
def __lowercase ( self ):
"""simple docstring"""
super().setup()
__UpperCamelCase : Optional[int] =nn.Dense(5 , dtype=self.dtype )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =super().__call__(*_lowerCamelCase , **_lowerCamelCase )
__UpperCamelCase : Optional[Any] =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A ( A_ ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =FlaxBigBirdForNaturalQuestionsModule
def A ( snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) -> Any:
def cross_entropy(snake_case_ ,snake_case_ ,snake_case_=None ):
__UpperCamelCase : Tuple =logits.shape[-1]
__UpperCamelCase : Tuple =(labels[..., None] == jnp.arange(__lowerCamelCase )[None]).astype('f4' )
__UpperCamelCase : Optional[Any] =jax.nn.log_softmax(__lowerCamelCase ,axis=-1 )
__UpperCamelCase : Tuple =-jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
__UpperCamelCase : Dict =reduction(__lowerCamelCase )
return loss
__UpperCamelCase : List[Any] =partial(__lowerCamelCase ,reduction=jnp.mean )
__UpperCamelCase : Optional[int] =cross_entropy(__lowerCamelCase ,__lowerCamelCase )
__UpperCamelCase : Any =cross_entropy(__lowerCamelCase ,__lowerCamelCase )
__UpperCamelCase : List[str] =cross_entropy(__lowerCamelCase ,__lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""google/bigbird-roberta-base"""
UpperCamelCase__ : Any =3_0_0_0
UpperCamelCase__ : Union[str, Any] =1_0_5_0_0
UpperCamelCase__ : int =1_2_8
UpperCamelCase__ : Dict =3
UpperCamelCase__ : Tuple =1
UpperCamelCase__ : List[str] =5
# tx_args
UpperCamelCase__ : Dict =3E-5
UpperCamelCase__ : Union[str, Any] =0.0
UpperCamelCase__ : Optional[Any] =2_0_0_0_0
UpperCamelCase__ : Optional[Any] =0.0095
UpperCamelCase__ : int ="""bigbird-roberta-natural-questions"""
UpperCamelCase__ : str ="""training-expt"""
UpperCamelCase__ : Optional[Any] ="""data/nq-training.jsonl"""
UpperCamelCase__ : Optional[int] ="""data/nq-validation.jsonl"""
def __lowercase ( self ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=_lowerCamelCase )
__UpperCamelCase : List[Any] =os.path.join(self.base_dir , self.save_dir )
__UpperCamelCase : Optional[Any] =self.batch_size_per_device * jax.device_count()
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : List[Any] =4_2
UpperCamelCase__ : Any =4_0_9_6 # no dynamic padding on TPUs
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =self.collate_fn(_lowerCamelCase )
__UpperCamelCase : Optional[Any] =jax.tree_util.tree_map(_lowerCamelCase , _lowerCamelCase )
return batch
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.fetch_inputs(features['input_ids'] )
__UpperCamelCase : Dict ={
'input_ids': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'attention_mask': jnp.array(_lowerCamelCase , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[self._fetch_inputs(_lowerCamelCase ) for ids in input_ids]
return zip(*_lowerCamelCase )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =[1 for _ in range(len(_lowerCamelCase ) )]
while len(_lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A ( snake_case_ ,snake_case_ ,snake_case_=None ) -> str:
if seed is not None:
__UpperCamelCase : Any =dataset.shuffle(seed=__lowerCamelCase )
for i in range(len(__lowerCamelCase ) // batch_size ):
__UpperCamelCase : Optional[Any] =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCamelCase )
@partial(jax.pmap ,axis_name='batch' )
def A ( snake_case_ ,snake_case_ ,**snake_case_ ) -> Union[str, Any]:
def loss_fn(snake_case_ ):
__UpperCamelCase : Any =model_inputs.pop('start_labels' )
__UpperCamelCase : Dict =model_inputs.pop('end_labels' )
__UpperCamelCase : Union[str, Any] =model_inputs.pop('pooled_labels' )
__UpperCamelCase : int =state.apply_fn(**__lowerCamelCase ,params=__lowerCamelCase ,dropout_rng=__lowerCamelCase ,train=__lowerCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] =outputs
return state.loss_fn(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,)
__UpperCamelCase , __UpperCamelCase : Optional[Any] =jax.random.split(__lowerCamelCase )
__UpperCamelCase : Tuple =jax.value_and_grad(__lowerCamelCase )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =grad_fn(state.params )
__UpperCamelCase : List[str] =jax.lax.pmean({'loss': loss} ,axis_name='batch' )
__UpperCamelCase : List[Any] =jax.lax.pmean(__lowerCamelCase ,'batch' )
__UpperCamelCase : Optional[int] =state.apply_gradients(grads=__lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name='batch' )
def A ( snake_case_ ,**snake_case_ ) -> Any:
__UpperCamelCase : Optional[Any] =model_inputs.pop('start_labels' )
__UpperCamelCase : Any =model_inputs.pop('end_labels' )
__UpperCamelCase : List[Any] =model_inputs.pop('pooled_labels' )
__UpperCamelCase : List[str] =state.apply_fn(**__lowerCamelCase ,params=state.params ,train=__lowerCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =outputs
__UpperCamelCase : int =state.loss_fn(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
__UpperCamelCase : List[Any] =jax.lax.pmean({'loss': loss} ,axis_name='batch' )
return metrics
class __A ( train_state.TrainState ):
"""simple docstring"""
UpperCamelCase__ : Dict =struct.field(pytree_node=A_ )
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : Optional[int] =4_2
UpperCamelCase__ : Dict =4_2
UpperCamelCase__ : Tuple =4_2
UpperCamelCase__ : int =4_2
UpperCamelCase__ : Union[str, Any] =4_2
UpperCamelCase__ : Tuple =4_2
UpperCamelCase__ : List[str] =None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : Dict =model.params
__UpperCamelCase : Union[str, Any] =TrainState.create(
apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , loss_fn=_lowerCamelCase , )
if ckpt_dir is not None:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] =restore_checkpoint(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : int ={
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
__UpperCamelCase , __UpperCamelCase : Optional[Any] =build_tx(**_lowerCamelCase )
__UpperCamelCase : Optional[int] =train_state.TrainState(
step=_lowerCamelCase , apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , opt_state=_lowerCamelCase , )
__UpperCamelCase : List[str] =args
__UpperCamelCase : Union[str, Any] =data_collator
__UpperCamelCase : List[Any] =lr
__UpperCamelCase : List[str] =params
__UpperCamelCase : int =jax_utils.replicate(_lowerCamelCase )
return state
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.args
__UpperCamelCase : int =len(_lowerCamelCase ) // args.batch_size
__UpperCamelCase : List[str] =jax.random.PRNGKey(0 )
__UpperCamelCase : List[Any] =jax.random.split(_lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__UpperCamelCase : Optional[Any] =jnp.array(0 , dtype=jnp.floataa )
__UpperCamelCase : Optional[int] =get_batched_dataset(_lowerCamelCase , args.batch_size , seed=_lowerCamelCase )
__UpperCamelCase : Optional[Any] =0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc=f'Running EPOCH-{epoch}' ):
__UpperCamelCase : Union[str, Any] =self.data_collator(_lowerCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str =self.train_step_fn(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
__UpperCamelCase : List[str] =jax_utils.unreplicate(state.step )
__UpperCamelCase : Union[str, Any] =running_loss.item() / i
__UpperCamelCase : str =self.scheduler_fn(state_step - 1 )
__UpperCamelCase : Union[str, Any] =self.evaluate(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : Any ={
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(_lowerCamelCase ) )
self.logger.log(_lowerCamelCase , commit=_lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=_lowerCamelCase )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =get_batched_dataset(_lowerCamelCase , self.args.batch_size )
__UpperCamelCase : List[Any] =len(_lowerCamelCase ) // self.args.batch_size
__UpperCamelCase : Tuple =jnp.array(0 , dtype=jnp.floataa )
__UpperCamelCase : Optional[int] =0
for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc='Evaluating ... ' ):
__UpperCamelCase : List[Any] =self.data_collator(_lowerCamelCase )
__UpperCamelCase : Any =self.val_step_fn(_lowerCamelCase , **_lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =jax_utils.unreplicate(_lowerCamelCase )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ' )
self.model_save_fn(_lowerCamelCase , params=state.params )
with open(os.path.join(_lowerCamelCase , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCamelCase , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(_lowerCamelCase , 'data_collator.joblib' ) )
with open(os.path.join(_lowerCamelCase , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , _lowerCamelCase )
print('DONE' )
def A ( snake_case_ ,snake_case_ ) -> Tuple:
print(F'RESTORING CHECKPOINT FROM {save_dir}' ,end=' ... ' )
with open(os.path.join(__lowerCamelCase ,'flax_model.msgpack' ) ,'rb' ) as f:
__UpperCamelCase : Optional[Any] =from_bytes(state.params ,f.read() )
with open(os.path.join(__lowerCamelCase ,'opt_state.msgpack' ) ,'rb' ) as f:
__UpperCamelCase : List[str] =from_bytes(state.opt_state ,f.read() )
__UpperCamelCase : Tuple =joblib.load(os.path.join(__lowerCamelCase ,'args.joblib' ) )
__UpperCamelCase : List[Any] =joblib.load(os.path.join(__lowerCamelCase ,'data_collator.joblib' ) )
with open(os.path.join(__lowerCamelCase ,'training_state.json' ) ,'r' ) as f:
__UpperCamelCase : Tuple =json.load(__lowerCamelCase )
__UpperCamelCase : Optional[int] =training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def A ( snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) -> List[Any]:
__UpperCamelCase : int =num_train_steps - warmup_steps
__UpperCamelCase : str =optax.linear_schedule(init_value=__lowerCamelCase ,end_value=__lowerCamelCase ,transition_steps=__lowerCamelCase )
__UpperCamelCase : List[Any] =optax.linear_schedule(init_value=__lowerCamelCase ,end_value=1e-7 ,transition_steps=__lowerCamelCase )
__UpperCamelCase : Optional[int] =optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def A ( snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ) -> List[str]:
def weight_decay_mask(snake_case_ ):
__UpperCamelCase : Union[str, Any] =traverse_util.flatten_dict(__lowerCamelCase )
__UpperCamelCase : List[Any] ={k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCamelCase )
__UpperCamelCase : Dict =scheduler_fn(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
__UpperCamelCase : Tuple =optax.adamw(learning_rate=__lowerCamelCase ,weight_decay=__lowerCamelCase ,mask=__lowerCamelCase )
return tx, lr
| 368
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ :int = logging.get_logger(__name__)
A_ :List[str] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[Any] ="""xlm-roberta-xl"""
def __init__( self , lowerCamelCase__=250880 , lowerCamelCase__=2560 , lowerCamelCase__=36 , lowerCamelCase__=32 , lowerCamelCase__=10240 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : Optional[int] =hidden_size
__UpperCamelCase : Tuple =num_hidden_layers
__UpperCamelCase : List[Any] =num_attention_heads
__UpperCamelCase : Tuple =hidden_act
__UpperCamelCase : str =intermediate_size
__UpperCamelCase : str =hidden_dropout_prob
__UpperCamelCase : List[str] =attention_probs_dropout_prob
__UpperCamelCase : Any =max_position_embeddings
__UpperCamelCase : List[str] =type_vocab_size
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : Dict =position_embedding_type
__UpperCamelCase : Dict =use_cache
__UpperCamelCase : Optional[int] =classifier_dropout
class __A ( a ):
"""simple docstring"""
@property
def __lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
__UpperCamelCase : int ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase : Optional[Any] ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 245
| 0
|
import math
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[str] = []
__A : Any = 2
__A : Union[str, Any] = int(math.sqrt(a ) ) # Size of every segment
__A : Any = [True] * (end + 1)
__A : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__A : Optional[int] = False
start += 1
prime += in_prime
__A : Any = end + 1
__A : Any = min(2 * end , a )
while low <= n:
__A : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__A : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__A : Optional[int] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__A : Optional[int] = high + 1
__A : Tuple = min(high + end , a )
return prime
print(sieve(10**6))
| 280
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = '''▁'''
UpperCAmelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = BertGenerationTokenizer
UpperCamelCase : str = False
UpperCamelCase : Tuple = True
def UpperCAmelCase_ ( self ):
super().setUp()
__A : Tuple = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : str = '<s>'
__A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase_ ( self ):
__A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_A ) , 1002 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ):
__A : str = BertGenerationTokenizer(_A , keep_accents=_A )
__A : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
__A : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A : Dict = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__A : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'Hello World!'
__A : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCAmelCase_ ( self ):
__A : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__A : int = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__A : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
__A : List[Any] = ' '.join(_A )
__A : Union[str, Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
__A : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
__A : int = BertGenerationConfig()
__A : List[str] = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
__A : str = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 280
| 1
|
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Dict ) -> Union[str, Any]:
__A : str = checkpoint
__A : Optional[int] = {}
__A : Optional[Any] = vae_state_dict['encoder.conv_in.weight']
__A : Dict = vae_state_dict['encoder.conv_in.bias']
__A : Optional[Any] = vae_state_dict['encoder.conv_out.weight']
__A : Dict = vae_state_dict['encoder.conv_out.bias']
__A : Dict = vae_state_dict['encoder.norm_out.weight']
__A : Optional[Any] = vae_state_dict['encoder.norm_out.bias']
__A : str = vae_state_dict['decoder.conv_in.weight']
__A : List[Any] = vae_state_dict['decoder.conv_in.bias']
__A : Tuple = vae_state_dict['decoder.conv_out.weight']
__A : Optional[Any] = vae_state_dict['decoder.conv_out.bias']
__A : Any = vae_state_dict['decoder.norm_out.weight']
__A : Union[str, Any] = vae_state_dict['decoder.norm_out.bias']
__A : Optional[Any] = vae_state_dict['quant_conv.weight']
__A : List[str] = vae_state_dict['quant_conv.bias']
__A : List[str] = vae_state_dict['post_quant_conv.weight']
__A : List[str] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__A : Tuple = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__A : Optional[int] = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(__snake_case )
}
# Retrieves the keys for the decoder up blocks only
__A : List[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__A : Optional[int] = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(__snake_case )
}
for i in range(__snake_case ):
__A : Any = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__A : Any = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
__A : List[Any] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
__A : Optional[int] = renew_vae_resnet_paths(__snake_case )
__A : Optional[int] = {'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : Optional[int] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__A : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__A : Optional[Any] = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
__A : Dict = renew_vae_resnet_paths(__snake_case )
__A : str = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : Union[str, Any] = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__A : Tuple = renew_vae_attention_paths(__snake_case )
__A : Tuple = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
for i in range(__snake_case ):
__A : Dict = num_up_blocks - 1 - i
__A : Optional[int] = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__A : Dict = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
__A : Union[str, Any] = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
__A : int = renew_vae_resnet_paths(__snake_case )
__A : List[Any] = {'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : str = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__A : str = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__A : Optional[Any] = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
__A : List[Any] = renew_vae_resnet_paths(__snake_case )
__A : List[str] = {'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
__A : List[Any] = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__A : Optional[Any] = renew_vae_attention_paths(__snake_case )
__A : int = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
return new_checkpoint
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , ) -> Dict:
# Only support V1
__A : Union[str, Any] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__A : List[Any] = io.BytesIO(r.content )
__A : List[Any] = OmegaConf.load(__snake_case )
__A : List[str] = 5_12
__A : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__A : List[Any] = {}
with safe_open(__snake_case , framework='pt' , device='cpu' ) as f:
for key in f.keys():
__A : Any = f.get_tensor(__snake_case )
else:
__A : Optional[int] = torch.load(__snake_case , map_location=__snake_case )['state_dict']
# Convert the VAE model.
__A : Optional[Any] = create_vae_diffusers_config(__snake_case , image_size=__snake_case )
__A : Any = custom_convert_ldm_vae_checkpoint(__snake_case , __snake_case )
__A : int = AutoencoderKL(**__snake_case )
vae.load_state_dict(__snake_case )
vae.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowercase__ : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 190
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase__ : Dict = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase__ : Any = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _lowerCAmelCase ( __snake_case : Any ) -> Optional[Any]:
__A : Dict = (images / 2 + 0.5).clamp(0 , 1 )
__A : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A : Dict = numpy_to_pil(__snake_case )
return images
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[Any]:
if images.ndim == 3:
__A : List[Any] = images[None, ...]
__A : List[str] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__A : str = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__A : str = [Image.fromarray(__snake_case ) for image in images]
return pil_images
| 190
| 1
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a : Any = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
a : Dict = json.load(f)
@require_torch
class a ( unittest.TestCase ):
def A_ ( self : int , lowercase_ : int ):
return FSMTTokenizer.from_pretrained(lowercase_ )
def A_ ( self : Tuple , lowercase_ : List[Any] ):
snake_case_ = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def A_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
snake_case_ = F"facebook/wmt19-{pair}"
snake_case_ = self.get_tokenizer(lowercase_ )
snake_case_ = self.get_model(lowercase_ )
snake_case_ = bleu_data[pair]['''src''']
snake_case_ = bleu_data[pair]['''tgt''']
snake_case_ = tokenizer(lowercase_ , return_tensors='''pt''' , truncation=lowercase_ , padding='''longest''' ).to(lowercase_ )
snake_case_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case_ = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
snake_case_ = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['''bleu'''] , lowercase_ )
| 56
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
a : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
a : Any = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 56
| 1
|
"""simple docstring"""
import torch
def lowerCAmelCase( )-> str:
"""simple docstring"""
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 0
print(f"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 366
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_0_0 )-> int:
"""simple docstring"""
UpperCamelCase_ = (n * (n + 1) // 2) ** 2
UpperCamelCase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60
| 0
|
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCAmelCase = 1
lowerCAmelCase = 1
while repunit:
lowerCAmelCase = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 ) -> int:
lowerCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 338
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ )
A : Dict = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A : Dict = dataset_size < in_memory_max_size
else:
A : Tuple = False
A : int = is_small_dataset(snake_case__ )
assert result == expected
| 3
| 0
|
"""simple docstring"""
from math import factorial
_UpperCamelCase = {str(d): factorial(d) for d in range(10)}
def _a ( _snake_case ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(_snake_case ) )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _snake_case ) if sum_of_digit_factorial(_snake_case ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 234
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
_UpperCamelCase = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_UpperCamelCase = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_UpperCamelCase = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_UpperCamelCase = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_UpperCamelCase = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_UpperCamelCase = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_UpperCamelCase = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_UpperCamelCase = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = []
_UpperCamelCase = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
if weight_type is not None:
UpperCAmelCase = getattr(_snake_case , _snake_case ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "running_mean":
UpperCAmelCase = value
elif weight_type == "running_var":
UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
if task == "s2t":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2T
UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase = None
UpperCAmelCase = MAPPING_T2S
UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2S
UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_snake_case , _snake_case ):
logger.info(F'''{name} was ignored''' )
continue
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_snake_case )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "bias" in name:
UpperCAmelCase = """bias"""
elif "weight" in name:
UpperCAmelCase = """weight"""
elif "running_mean" in name:
UpperCAmelCase = """running_mean"""
elif "running_var" in name:
UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase = """num_batches_tracked"""
else:
UpperCAmelCase = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase = config.max_text_positions
UpperCAmelCase = SpeechTaForSpeechToText(_snake_case )
elif task == "t2s":
UpperCAmelCase = 1876
UpperCAmelCase = 600
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForTextToSpeech(_snake_case )
elif task == "s2s":
UpperCAmelCase = 1876
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForSpeechToSpeech(_snake_case )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
UpperCAmelCase = SpeechTaTokenizer(_snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken("""<mask>""" , lstrip=_snake_case , rstrip=_snake_case )
UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = SpeechTaProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
processor.save_pretrained(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] , _snake_case , _snake_case )
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 234
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_A : str =TypeVar('''T''')
class _lowercase ( Generic[T] ):
def __init__( self: List[str] , UpperCamelCase__: T ):
lowerCamelCase__ : List[Any] = data
lowerCamelCase__ : int = None
def __str__( self: List[str] ):
return F'''{self.data}'''
class _lowercase ( Generic[T] ):
def __init__( self: Union[str, Any] ):
lowerCamelCase__ : List[Any] = None
def __iter__( self: List[str] ):
lowerCamelCase__ : List[str] = self.top
while node:
yield node.data
lowerCamelCase__ : int = node.next
def __str__( self: Optional[Any] ):
return "->".join([str(__lowercase ) for item in self] )
def __len__( self: Any ):
return len(tuple(iter(self ) ) )
def lowerCamelCase_ ( self: Any ):
return self.top is None
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: T ):
lowerCamelCase__ : Dict = Node(__lowercase )
if not self.is_empty():
lowerCamelCase__ : Optional[int] = self.top
lowerCamelCase__ : Optional[Any] = node
def lowerCamelCase_ ( self: Optional[Any] ):
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __lowercase )
lowerCamelCase__ : Union[str, Any] = self.top
lowerCamelCase__ : List[str] = self.top.next
return pop_node.data
def lowerCamelCase_ ( self: Optional[Any] ):
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 41
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 0
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
A__ : List[str] =float(__snake_case )
except ValueError:
raise ValueError("""Please enter a valid number""" )
A__ : Tuple =decimal - int(__snake_case )
if fractional_part == 0:
return int(__snake_case ), 1
else:
A__ : int =len(str(__snake_case ).split(""".""" )[1] )
A__ : Optional[int] =int(decimal * (10**number_of_frac_digits) )
A__ : str =10**number_of_frac_digits
A__ : Dict =denominator, numerator
while True:
A__ : Tuple =dividend % divisor
if remainder == 0:
break
A__ : Optional[Any] =divisor, remainder
A__ : List[str] =numerator / divisor, denominator / divisor
return int(__snake_case ), int(__snake_case )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 357
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInstructPixaPixPipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A__ : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A__ : str =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A__ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A__ : Tuple =CLIPTextModel(lowerCAmelCase_ )
A__ : int =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Union[str, Any] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=0 ) -> str:
'''simple docstring'''
A__ : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A__ : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : Any =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : int =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Any =self.get_dummy_components()
A__ : List[str] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Dict =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : List[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : List[Any] =sd_pipe(**lowerCAmelCase_ ).images
A__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Tuple =np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[str] =self.get_dummy_components()
A__ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Union[str, Any] =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Optional[int] ="""french fries"""
A__ : Tuple =sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
A__ : Union[str, Any] =output.images
A__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Tuple =np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : str =self.get_dummy_components()
A__ : List[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Union[str, Any] =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Tuple =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict =[inputs["""prompt"""]] * 2
A__ : Optional[int] =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
A__ : List[str] =torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
A__ : Union[str, Any] =image / 2 + 0.5
A__ : Optional[int] =image.permute(0 , 3 , 1 , 2 )
A__ : Dict =image.repeat(2 , 1 , 1 , 1 )
A__ : int =sd_pipe(**lowerCAmelCase_ ).images
A__ : List[Any] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A__ : List[Any] =np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[str] =self.get_dummy_components()
A__ : List[str] =EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
A__ : str =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : int =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Optional[Any] =sd_pipe(**lowerCAmelCase_ ).images
A__ : Tuple =image[0, -3:, -3:, -1]
A__ : List[str] =[round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A__ : Any =np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.get_dummy_components()
A__ : Optional[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Any =VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
A__ : Dict =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : str =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type="""pt""" ) )[0]
A__ : List[Any] =components["""vae"""]
A__ : Dict =self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A__ : List[Any] =vae.encode(inputs[image_param] ).latent_dist.mode()
A__ : Optional[Any] =pipe(**lowerCAmelCase_ )[0]
A__ : Dict =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int , lowerCAmelCase_ : int=0 ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =torch.manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
A__ : List[Any] ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Optional[Any] =self.get_inputs()
A__ : Optional[Any] =pipe(**lowerCAmelCase_ ).images
A__ : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Dict =np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : List[str] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ )
A__ : str =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] =self.get_inputs()
A__ : Tuple =pipe(**lowerCAmelCase_ ).images
A__ : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : List[Any] =np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ )
A__ : str =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Optional[Any] =self.get_inputs()
A__ : List[str] =pipe(**lowerCAmelCase_ ).images
A__ : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Any =np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
A__ : Any =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A__ : List[str] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ : Optional[Any] =latents[0, -3:, -3:, -1]
A__ : Tuple =np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A__ : List[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ : Dict =latents[0, -3:, -3:, -1]
A__ : List[Any] =np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A__ : List[str] =False
A__ : Optional[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
A__ : int =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Optional[Any] =self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Dict =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
A__ : Union[str, Any] =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : List[str] =self.get_inputs()
A__ : Dict =pipe(**lowerCAmelCase_ )
A__ : List[str] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A__ : int =inputs["""image"""].resize((5_04, 5_04) )
A__ : Optional[int] ="""timbrooks/instruct-pix2pix"""
A__ : List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict =pipe(**lowerCAmelCase_ )
A__ : Dict =output.images[0]
A__ : int =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
A__ : Dict =np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 136
| 0
|
from __future__ import annotations
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = text, pattern
SCREAMING_SNAKE_CASE_ : List[str] = len(UpperCAmelCase_ ), len(UpperCAmelCase_ )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = self.mismatch_in_text(UpperCAmelCase_ )
if mismatch_index == -1:
positions.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ : str = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase : Dict = '''ABAABA'''
lowerCAmelCase : List[Any] = '''AB'''
lowerCAmelCase : Dict = BoyerMooreSearch(text, pattern)
lowerCAmelCase : str = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 253
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_UpperCAmelCase, _UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCAmelCase : List[Any] = (
'Wrong input data\'s dimensions... '
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Dict = (
'Wrong input data\'s shape... '
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Any = (
'Input data have different datatype... '
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_UpperCAmelCase )
lowerCAmelCase : int = []
for value in value_array:
lowerCAmelCase : Tuple = euclidean(_UpperCAmelCase, dataset[0] )
lowerCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : Dict = euclidean(_UpperCAmelCase, _UpperCAmelCase )
if dist > temp_dist:
lowerCAmelCase : Tuple = temp_dist
lowerCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return np.dot(_UpperCAmelCase, _UpperCAmelCase ) / (norm(_UpperCAmelCase ) * norm(_UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138
| 0
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , 0 , -1 ):
UpperCamelCase = False
for j in range(_SCREAMING_SNAKE_CASE , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCamelCase , UpperCamelCase = unsorted[j - 1], unsorted[j]
UpperCamelCase = True
for j in range(_SCREAMING_SNAKE_CASE ):
if unsorted[j] > unsorted[j + 1]:
UpperCamelCase , UpperCamelCase = unsorted[j + 1], unsorted[j]
UpperCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 358
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = tempfile.mkdtemp()
snake_case : Tuple = BlipImageProcessor()
snake_case : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case : Dict = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : Dict = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(snake_case__ , return_tensors="np" )
snake_case : List[Any] = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Union[str, Any] = "lower newer"
snake_case : int = processor(text=snake_case__ )
snake_case : List[str] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : str = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Tuple = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Dict = processor.batch_decode(snake_case__ )
snake_case : str = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Optional[int] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = "lower newer"
snake_case : int = self.prepare_image_inputs()
snake_case : Dict = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 59
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
snake_case : Optional[Any] = model
snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ )
snake_case : int = kwargs.get("latest_model_name" , snake_case__ )
def __call__(self : Tuple , **snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any:
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
snake_case : Optional[int] = "CPUExecutionProvider"
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name )
snake_case : str = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str:
'''simple docstring'''
if os.path.isfile(snake_case__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
snake_case : Any = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
snake_case : Union[str, Any] = Path(snake_case__ )
# load model from hub
else:
# download model
snake_case : Dict = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
snake_case : List[Any] = Path(snake_case__ ).parent
snake_case : Union[str, Any] = Path(snake_case__ ).name
snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = None
if len(str(snake_case__ ).split("@" ) ) == 2:
snake_case , snake_case : int = model_id.split("@" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 59
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCAmelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCAmelCase = [0, 25, 50]
__UpperCAmelCase = [25, 50, 75]
__UpperCAmelCase = fuzz.membership.trimf(X, abca)
__UpperCAmelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCAmelCase = np.ones(75)
__UpperCAmelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCAmelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCAmelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCAmelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCAmelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCAmelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCAmelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCAmelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCAmelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 366
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : int=18 , lowerCAmelCase : int=30 , lowerCAmelCase : Optional[int]=4_00 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=True , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
__lowerCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase : str = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[str] = max_resolution
__lowerCAmelCase : List[Any] = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : List[Any] = do_center_crop
__lowerCAmelCase : Optional[Any] = crop_size
__lowerCAmelCase : int = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 139
| 0
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Dict = logging.get_logger(__name__)
def _lowercase ( __snake_case ) -> List[Any]:
__lowerCAmelCase : str = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowerCAmelCase : str = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" ,__snake_case )
if matches:
__lowerCAmelCase : List[Any] = float(matches[1] )
__lowerCAmelCase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowerCAmelCase : Optional[int] = 1_001
__lowerCAmelCase : str = "imagenet-1k-id2label.json"
__lowerCAmelCase : Optional[Any] = "huggingface/label-files"
__lowerCAmelCase : Dict = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type="dataset" ) ,"r" ) )
__lowerCAmelCase : List[str] = {int(__snake_case ) + 1: v for k, v in idalabel.items()}
__lowerCAmelCase : str = "background"
__lowerCAmelCase : int = idalabel
__lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( ) -> List[Any]:
__lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase : Optional[Any] = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=False ) -> List[str]:
__lowerCAmelCase : List[Any] = get_mobilenet_va_config(__snake_case )
# Load 🤗 model
__lowerCAmelCase : Tuple = MobileNetVaForImageClassification(__snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__snake_case ,__snake_case ,__snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowerCAmelCase : Any = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} ,size={"shortest_edge": config.image_size + 32} ,)
__lowerCAmelCase : Tuple = image_processor(images=prepare_img() ,return_tensors="pt" )
__lowerCAmelCase : Any = model(**__snake_case )
__lowerCAmelCase : List[Any] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
__lowerCAmelCase : Tuple = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__lowerCAmelCase : Any = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__lowerCAmelCase : Union[str, Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,__snake_case ,atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
__lowerCAmelCase : str = "google/" + model_name
image_processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 269
|
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 269
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = 'gpt_neo'
UpperCAmelCase_ = ['past_key_values']
UpperCAmelCase_ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Tuple , lowercase__ : Optional[int]=50_257 , lowercase__ : int=2_048 , lowercase__ : Optional[Any]=2_048 , lowercase__ : Optional[int]=24 , lowercase__ : str=[[["global", "local"], 12]] , lowercase__ : Any=16 , lowercase__ : Tuple=None , lowercase__ : List[Any]=256 , lowercase__ : List[str]="gelu_new" , lowercase__ : Optional[int]=0.0 , lowercase__ : int=0.0 , lowercase__ : List[str]=0.0 , lowercase__ : List[str]=0.1 , lowercase__ : Union[str, Any]=1e-5 , lowercase__ : int=0.02 , lowercase__ : Optional[Any]=True , lowercase__ : Optional[Any]=50_256 , lowercase__ : Any=50_256 , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = window_size
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_dropout
lowerCAmelCase__ = embed_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = attention_types
lowerCAmelCase__ = self.expand_attention_types_params(lowercase__)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"""but is `len(config.attention_layers) = {len(self.attention_layers)}`, """
F"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.')
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
@staticmethod
def __snake_case ( lowercase__ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
import torch
lowerCAmelCase__ = input.size()
lowerCAmelCase__ = len(lowerCAmelCase__ )
lowerCAmelCase__ = shape[dimension]
lowerCAmelCase__ = torch.arange(0 , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = torch.div(sizedim - size , lowerCAmelCase__ , rounding_mode='floor' ) + 1
lowerCAmelCase__ = torch.arange(lowerCAmelCase__ ) + low_indices[:min_length][:, None]
lowerCAmelCase__ = [slice(lowerCAmelCase__ )] * rank
lowerCAmelCase__ = indices
lowerCAmelCase__ = input[s]
lowerCAmelCase__ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase__ )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
import torch
lowerCAmelCase__ = torch.arange(1 , lowerCAmelCase__ )
lowerCAmelCase__ = torch.remainder(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = remainders == 0
lowerCAmelCase__ = candidates[divisor_indices]
lowerCAmelCase__ = torch.max(lowerCAmelCase__ )
return largest_divisor, torch.div(lowerCAmelCase__ , lowerCAmelCase__ , rounding_mode='floor' )
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='inputs')
lowerCAmelCase__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCAmelCase__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __snake_case ( self : Optional[int]):
'''simple docstring'''
return self._config.num_heads
def __snake_case ( self : Optional[int] , lowercase__ : PreTrainedTokenizer , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCAmelCase__ = super(lowercase__ , self).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(lowercase__), torch.zeros(lowercase__)) for _ in range(self.num_layers)
]
lowerCAmelCase__ = common_inputs['attention_mask']
if self.use_past:
lowerCAmelCase__ = ordered_inputs['attention_mask'].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__)] , dim=1)
return ordered_inputs
@property
def __snake_case ( self : List[Any]):
'''simple docstring'''
return 13
| 119
|
from __future__ import annotations
lowerCAmelCase__ = tuple[int, int, int]
lowerCAmelCase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase__ = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowerCAmelCase__ = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowerCAmelCase__ = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowerCAmelCase__ = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase__ = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowerCAmelCase__ = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowerCAmelCase__ = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowerCAmelCase__ = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowerCAmelCase__ = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowerCAmelCase__ = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
lowerCAmelCase__ = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
lowerCAmelCase__ = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def __lowerCamelCase ( lowerCAmelCase__ ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = F"""Plugboard setting isn't type string ({type(lowerCAmelCase__ )})"""
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
lowerCAmelCase__ = F"""Odd number of symbols ({len(lowerCAmelCase__ )})"""
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ = F"""'{i}' not in list of symbols"""
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
lowerCAmelCase__ = F"""Duplicate symbol ({i})"""
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
lowerCAmelCase__ = {}
for j in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ):
lowerCAmelCase__ = pbstring[j + 1]
lowerCAmelCase__ = pbstring[j]
return pb
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (rotora, rotora, rotora) , lowerCAmelCase__ = "" , ):
lowerCAmelCase__ = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _validator(
lowerCAmelCase__ , lowerCAmelCase__ , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
lowerCAmelCase__ = abc.index(lowerCAmelCase__ ) + rotorposa
lowerCAmelCase__ = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
lowerCAmelCase__ = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
lowerCAmelCase__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = 'This is my Python script that emulates the Enigma machine from WWII.'
lowerCAmelCase__ = (1, 1, 1)
lowerCAmelCase__ = 'pictures'
lowerCAmelCase__ = (rotora, rotora, rotora)
lowerCAmelCase__ = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 119
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
lowercase : Dict = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowercase : str = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowercase : Dict = '''.'''.join(__magic_name__ )
return test_module_path
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] = get_module_path(__magic_name__ )
lowercase : Tuple = importlib.import_module(__magic_name__ )
return test_module
def snake_case( __magic_name__ ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] = []
lowercase : Any = get_test_module(__magic_name__ )
for attr in dir(__magic_name__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__magic_name__ , __magic_name__ ) )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = []
lowercase : List[str] = get_test_module(__magic_name__ )
for attr in dir(__magic_name__ ):
lowercase : Optional[int] = getattr(__magic_name__ , __magic_name__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase : Any = getattr(__magic_name__ , '''all_model_classes''' , [] )
if len(__magic_name__ ) > 0:
test_classes.append(__magic_name__ )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] = get_test_classes(__magic_name__ )
lowercase : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def snake_case( __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : Optional[int] = test_class()
if hasattr(__magic_name__ , '''setUp''' ):
test.setUp()
lowercase : Dict = None
if hasattr(__magic_name__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase : Optional[Any] = test.model_tester.__class__
return model_tester
def snake_case( __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[str] = get_test_classes(__magic_name__ )
lowercase : Tuple = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__magic_name__ )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = get_test_classes_for_model(__magic_name__ , __magic_name__ )
lowercase : Dict = []
for test_class in test_classes:
lowercase : Any = get_model_tester_from_test_class(__magic_name__ )
if tester_class is not None:
tester_classes.append(__magic_name__ )
# sort with class names
return sorted(__magic_name__ , key=lambda __magic_name__ : x.__name__ )
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] = get_test_classes(__magic_name__ )
lowercase : Any = {test_class: get_model_tester_from_test_class(__magic_name__ ) for test_class in test_classes}
return test_tester_mapping
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Tuple = get_model_classes(__magic_name__ )
lowercase : int = {
model_class: get_test_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes
}
return model_test_mapping
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = get_model_classes(__magic_name__ )
lowercase : Tuple = {
model_class: get_tester_classes_for_model(__magic_name__ , __magic_name__ ) for model_class in model_classes
}
return model_to_tester_mapping
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
return o
elif isinstance(__magic_name__ , __magic_name__ ):
return o.__name__
elif isinstance(__magic_name__ , (list, tuple) ):
return [to_json(__magic_name__ ) for x in o]
elif isinstance(__magic_name__ , __magic_name__ ):
return {to_json(__magic_name__ ): to_json(__magic_name__ ) for k, v in o.items()}
else:
return o
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 1
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A = None ) -> list[list[str]]:
lowerCAmelCase__ = word_bank or []
# create a table
lowerCAmelCase__ = len(A ) + 1
lowerCAmelCase__ = []
for _ in range(A ):
table.append([] )
# seed value
lowerCAmelCase__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(A )] == word:
lowerCAmelCase__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(A )]:
combination.reverse()
return table[len(A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 228
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__UpperCAmelCase = {
'''junnyu/roformer_chinese_small''': 1_536,
'''junnyu/roformer_chinese_base''': 1_536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__UpperCAmelCase = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Tuple = RoFormerTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Tuple:
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents
):
lowerCAmelCase__ = getattr(lowerCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = pre_tok_class(**lowerCamelCase_ )
lowerCAmelCase__ = do_lower_case
def __getstate__( self ) -> Any:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = BertPreTokenizer()
return state
def __setstate__( self , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = d
lowerCAmelCase__ = self.__dict__['''_tokenizer'''].get_vocab()
lowerCAmelCase__ = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Union[str, Any]:
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
lowerCAmelCase__ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Union[str, Any]:
lowerCAmelCase__ = BertPreTokenizer()
return super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
| 228
| 1
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__a = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
lowercase = None
lowercase = None
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
lowercase = datasets.Audio()
lowercase = "audio"
lowercase = AudioFolderConfig
lowercase = 42 # definition at the bottom of the script
lowercase = AudioClassification(audio_column="audio" , label_column="label" )
__a = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__a = AUDIO_EXTENSIONS
| 35
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : str , _lowercase : str , _lowercase : Optional[Any]=1024) -> List[Any]:
"""simple docstring"""
a__ , a__ : Optional[int] = [], []
a__ : Union[str, Any] = list(zip(_lowercase , _lowercase))
a__ , a__ : List[Any] = sorted_examples[0]
def is_too_big(_lowercase : Tuple):
return tok(_lowercase , return_tensors="""pt""").input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
a__ : Tuple = new_src + """ """ + src
a__ : Any = new_tgt + """ """ + tgt
if is_too_big(_lowercase) or is_too_big(_lowercase): # cant fit, finalize example
finished_src.append(_lowercase)
finished_tgt.append(_lowercase)
a__ , a__ : List[Any] = src, tgt
else: # can fit, keep adding
a__ , a__ : Tuple = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowercase)
finished_tgt.append(_lowercase)
return finished_src, finished_tgt
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Path , _lowercase : Any , _lowercase : str) -> Tuple:
"""simple docstring"""
a__ : Any = Path(_lowercase)
save_path.mkdir(exist_ok=_lowercase)
for split in ["train"]:
a__ , a__ : List[Any] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
a__ : Dict = [x.rstrip() for x in Path(_lowercase).open().readlines()]
a__ : Optional[Any] = [x.rstrip() for x in Path(_lowercase).open().readlines()]
a__ , a__ : List[Any] = pack_examples(_lowercase , _lowercase , _lowercase , _lowercase)
print(F'''packed {split} split from {len(_lowercase)} examples -> {len(_lowercase)}.''')
Path(save_path / F'''{split}.source''').open("""w""").write("""\n""".join(_lowercase))
Path(save_path / F'''{split}.target''').open("""w""").write("""\n""".join(_lowercase))
for split in ["val", "test"]:
a__ , a__ : Any = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_lowercase , save_path / F'''{split}.source''')
shutil.copyfile(_lowercase , save_path / F'''{split}.target''')
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=_lowercase , help="""like facebook/bart-large-cnn,t5-base, etc.""")
parser.add_argument("""--max_seq_len""" , type=_lowercase , default=128)
parser.add_argument("""--data_dir""" , type=_lowercase)
parser.add_argument("""--save_path""" , type=_lowercase)
a__ : List[Any] = parser.parse_args()
a__ : List[Any] = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(_lowercase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 170
| 0
|
'''simple docstring'''
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class a :
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = True , __magic_name__ = False ) -> str:
_a = scheduler
_a = optimizers if isinstance(__magic_name__ , (list, tuple) ) else [optimizers]
_a = split_batches
_a = step_with_optimizer
_a = GradientState()
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__magic_name__ , **__magic_name__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__magic_name__ , **__magic_name__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_a = AcceleratorState().num_processes
for _ in range(__magic_name__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__magic_name__ , **__magic_name__ )
else:
self.scheduler.step(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.scheduler.get_last_lr()
def __UpperCAmelCase ( self ) -> Any:
return self.scheduler.state_dict()
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
self.scheduler.load_state_dict(__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
return self.scheduler.get_lr()
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[str]:
return self.scheduler.print_lr(*__magic_name__ , **__magic_name__ )
| 363
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : List[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104
| 0
|
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
def count_of_possible_combinations(a__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
def count_of_possible_combinations_with_dp_array(
a__ , a__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__a = sum(
count_of_possible_combinations_with_dp_array(target - item , a__ )
for item in array )
__a = answer
return answer
__a = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> int:
__a = [0] * (target + 1)
__a = 1
for i in range(1 , target + 1 ):
for j in range(a__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Dict = 3
A : Any = 5
A : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 6
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ , lowercase__ : Optional[int] = [], []
while len(UpperCAmelCase ) > 1:
lowercase__ , lowercase__ : List[str] = min(UpperCAmelCase ), max(UpperCAmelCase )
start.append(UpperCAmelCase )
end.append(UpperCAmelCase )
collection.remove(UpperCAmelCase )
collection.remove(UpperCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__a: Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
__a: Tuple = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 198
| 0
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __a ( lowerCAmelCase__ ):
def __init__( self , a__ , a__ , a__ = None , a__ = None , a__ = False , **a__ , ):
super().__init__(features=a__ , cache_dir=a__ , keep_in_memory=a__ , **a__ )
_lowerCamelCase = Sql(
cache_dir=a__ , features=a__ , sql=a__ , con=a__ , **a__ , )
def snake_case_ ( self ):
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
self.builder.download_and_prepare(
download_config=a__ , download_mode=a__ , verification_mode=a__ , base_path=a__ , )
# Build dataset for splits
_lowerCamelCase = self.builder.as_dataset(
split='train' , verification_mode=a__ , in_memory=self.keep_in_memory )
return dataset
class __a :
def __init__( self , a__ , a__ , a__ , a__ = None , a__ = None , **a__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
_lowerCamelCase = dataset
_lowerCamelCase = name
_lowerCamelCase = con
_lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCamelCase = num_proc
_lowerCamelCase = to_sql_kwargs
def snake_case_ ( self ):
_lowerCamelCase = self.to_sql_kwargs.pop('sql' , a__ )
_lowerCamelCase = self.to_sql_kwargs.pop('con' , a__ )
_lowerCamelCase = self.to_sql_kwargs.pop('index' , a__ )
_lowerCamelCase = self._write(index=a__ , **self.to_sql_kwargs )
return written
def snake_case_ ( self , a__ ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = args
_lowerCamelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_lowerCamelCase = query_table(
table=self.dataset.data , key=slice(a__ , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCamelCase = batch.to_pandas()
_lowerCamelCase = df.to_sql(self.name , self.con , index=a__ , **a__ )
return num_rows or len(a__ )
def snake_case_ ( self , a__ , **a__ ):
_lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCamelCase , _lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a__ , a__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 80
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : str =logging.get_logger(__name__)
A_ : int =OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
A_ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Any:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCamelCase = model_type_to_module_name(snake_case )
_lowerCamelCase = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(snake_case , snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case , '__name__' , snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCamelCase = importlib.import_module('transformers' )
if hasattr(snake_case , snake_case ):
return getattr(snake_case , snake_case )
return None
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, os.PathLike] , snake_case : Optional[Union[str, os.PathLike]] = None , snake_case : bool = False , snake_case : bool = False , snake_case : Optional[Dict[str, str]] = None , snake_case : Optional[Union[bool, str]] = None , snake_case : Optional[str] = None , snake_case : bool = False , **snake_case : List[str] , )-> Optional[int]:
_lowerCamelCase = get_file_from_repo(
snake_case , snake_case , cache_dir=snake_case , force_download=snake_case , resume_download=snake_case , proxies=snake_case , use_auth_token=snake_case , revision=snake_case , local_files_only=snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(snake_case , encoding='utf-8' ) as reader:
return json.load(snake_case )
class __a :
def __init__( self ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(a__ )
def snake_case_ ( cls , a__ , **a__ ):
_lowerCamelCase = kwargs.pop('config' , a__ )
_lowerCamelCase = kwargs.pop('trust_remote_code' , a__ )
_lowerCamelCase = True
_lowerCamelCase , _lowerCamelCase = ImageProcessingMixin.get_image_processor_dict(a__ , **a__ )
_lowerCamelCase = config_dict.get('image_processor_type' , a__ )
_lowerCamelCase = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowerCamelCase = config_dict.pop('feature_extractor_type' , a__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_lowerCamelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
_lowerCamelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a__ , a__ ):
_lowerCamelCase = AutoConfig.from_pretrained(a__ , **a__ )
# It could be in `config.image_processor_type``
_lowerCamelCase = getattr(a__ , 'image_processor_type' , a__ )
if hasattr(a__ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_lowerCamelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_lowerCamelCase = image_processor_class_from_name(a__ )
_lowerCamelCase = image_processor_auto_map is not None
_lowerCamelCase = image_processor_class is not None or type(a__ ) in IMAGE_PROCESSOR_MAPPING
_lowerCamelCase = resolve_trust_remote_code(
a__ , a__ , a__ , a__ )
if has_remote_code and trust_remote_code:
_lowerCamelCase = get_class_from_dynamic_module(
a__ , a__ , **a__ )
_lowerCamelCase = kwargs.pop('code_revision' , a__ )
if os.path.isdir(a__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a__ , **a__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a__ , **a__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a__ ) in IMAGE_PROCESSOR_MAPPING:
_lowerCamelCase = IMAGE_PROCESSOR_MAPPING[type(a__ )]
return image_processor_class.from_dict(a__ , **a__ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def snake_case_ ( a__ , a__ ):
IMAGE_PROCESSOR_MAPPING.register(a__ , a__ )
| 80
| 1
|
def _UpperCamelCase ( lowercase__ = 1000 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = 1, 1
__SCREAMING_SNAKE_CASE : List[str] = []
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = prev_numerator + 2 * prev_denominator
__SCREAMING_SNAKE_CASE : List[Any] = prev_numerator + prev_denominator
if len(str(lowercase__ ) ) > len(str(lowercase__ ) ):
result.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = numerator
__SCREAMING_SNAKE_CASE : List[Any] = denominator
return len(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase__ = get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
__lowerCAmelCase = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
__lowerCAmelCase = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
__lowerCAmelCase = state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
__lowerCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
__lowerCAmelCase = optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
__lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 92
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'visual_bert'
def __init__( self : int , lowerCAmelCase : Optional[int]=3_0522 , lowerCAmelCase : str=768 , lowerCAmelCase : Dict=512 , lowerCAmelCase : Dict=12 , lowerCAmelCase : Dict=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : int=512 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Optional[int]=1e-12 , lowerCAmelCase : int=False , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : str=0 , lowerCAmelCase : Optional[int]=2 , **lowerCAmelCase : Dict , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = visual_embedding_dim
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = bypass_transformer
lowerCAmelCase = special_visual_initialize
| 155
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155
| 1
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : str=None , **UpperCAmelCase : Any ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = eval_examples
__lowerCamelCase : Dict = post_process_function
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str = "eval" ):
__lowerCamelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
__lowerCamelCase : Optional[int] = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCamelCase : int = self.compute_metrics
__lowerCamelCase : Any = None
__lowerCamelCase : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowerCamelCase : Dict = time.time()
try:
__lowerCamelCase : List[Any] = eval_loop(
_SCREAMING_SNAKE_CASE , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , metric_key_prefix=_SCREAMING_SNAKE_CASE , )
finally:
__lowerCamelCase : str = compute_metrics
__lowerCamelCase : List[str] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowerCamelCase : List[str] = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions )
__lowerCamelCase : Optional[Any] = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowerCamelCase : str = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
__lowerCamelCase : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowerCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , _SCREAMING_SNAKE_CASE )
return metrics
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int] = "test" ):
__lowerCamelCase : Optional[int] = self.get_test_dataloader(_SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCamelCase : List[Any] = self.compute_metrics
__lowerCamelCase : List[Any] = None
__lowerCamelCase : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowerCamelCase : str = time.time()
try:
__lowerCamelCase : Optional[Any] = eval_loop(
_SCREAMING_SNAKE_CASE , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , metric_key_prefix=_SCREAMING_SNAKE_CASE , )
finally:
__lowerCamelCase : str = compute_metrics
__lowerCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowerCamelCase : Optional[Any] = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions , "predict" )
__lowerCamelCase : Tuple = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowerCamelCase : List[Any] = metrics.pop(_SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_SCREAMING_SNAKE_CASE )
| 355
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Input value must be an 'int' type" )
__lowerCamelCase : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 0
|
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
_UpperCAmelCase = [int(__lowerCAmelCase ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
_UpperCAmelCase = str(__lowerCAmelCase )
steps += 1
return steps
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
_UpperCAmelCase = [int(__lowerCAmelCase ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
_UpperCAmelCase = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 343
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343
| 1
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> int:
# Load checkpoint
__lowerCamelCase : str = torch.load(_lowerCAmelCase ,map_location='cpu' )
__lowerCamelCase : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
__lowerCamelCase : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCamelCase : Optional[Any] = v
else:
__lowerCamelCase : Any = v
__lowerCamelCase : int = chkpt['params']
__lowerCamelCase : List[str] = {n: v for n, v in config.items() if not isinstance(_lowerCAmelCase ,(torch.FloatTensor, numpy.ndarray) )}
__lowerCamelCase : List[Any] = chkpt['dico_word2id']
__lowerCamelCase : str = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' ,'' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCamelCase : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowerCamelCase : List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME
__lowerCamelCase : int = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCAmelCase ,indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCAmelCase ,indent=2 ) + '\n' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 208
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""xlm-roberta"""
def __init__( self : Any , _a : Optional[Any]=3_0522 , _a : Optional[Any]=768 , _a : int=12 , _a : Tuple=12 , _a : str=3072 , _a : List[Any]="gelu" , _a : int=0.1 , _a : Optional[int]=0.1 , _a : Optional[int]=512 , _a : List[Any]=2 , _a : Optional[Any]=0.02 , _a : str=1e-12 , _a : str=1 , _a : str=0 , _a : Optional[Any]=2 , _a : Optional[int]="absolute" , _a : int=True , _a : Tuple=None , **_a : Any , ) -> Dict:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : str = position_embedding_type
__lowerCamelCase : List[Any] = use_cache
__lowerCamelCase : int = classifier_dropout
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 208
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase: Union[str, Any] = logging.get_logger(__name__)
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] = b.T
lowercase : str = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
lowercase : Union[str, Any] = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
lowercase : int = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
lowercase : str = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : Any = x.reshape(-1 , 3 )
lowercase : List[Any] = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : Optional[int], lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None, lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : bool = True, lowerCAmelCase : bool = True, **lowerCAmelCase : Dict, ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Dict = size if size is not None else {'height': 256, 'width': 256}
lowercase : int = get_size_dict(lowerCAmelCase )
lowercase : Optional[Any] = np.array(lowerCAmelCase ) if clusters is not None else None
lowercase : Optional[int] = do_resize
lowercase : int = size
lowercase : Optional[Any] = resample
lowercase : str = do_normalize
lowercase : str = do_color_quantize
def lowercase ( self : Any, lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Dict, ) -> np.ndarray:
lowercase : Dict = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowerCAmelCase, size=(size['height'], size['width']), resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[int], lowerCAmelCase : np.ndarray, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray:
lowercase : str = rescale(image=lowerCAmelCase, scale=1 / 127.5, data_format=lowerCAmelCase )
lowercase : Tuple = image - 1
return image
def lowercase ( self : int, lowerCAmelCase : ImageInput, lowerCAmelCase : bool = None, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST, **lowerCAmelCase : Tuple, ) -> PIL.Image.Image:
lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowercase : str = size if size is not None else self.size
lowercase : Tuple = get_size_dict(lowerCAmelCase )
lowercase : int = resample if resample is not None else self.resample
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase : Dict = clusters if clusters is not None else self.clusters
lowercase : List[str] = np.array(lowerCAmelCase )
lowercase : Any = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowercase : str = [self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_normalize:
lowercase : int = [self.normalize(image=lowerCAmelCase ) for image in images]
if do_color_quantize:
lowercase : List[Any] = [to_channel_dimension_format(lowerCAmelCase, ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase : Optional[Any] = np.array(lowerCAmelCase )
lowercase : Optional[Any] = color_quantize(lowerCAmelCase, lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase : List[str] = images.shape[0]
lowercase : str = images.reshape(lowerCAmelCase, -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase : Tuple = list(lowerCAmelCase )
else:
lowercase : str = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Dict = {'input_ids': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 53
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCamelCase: Any = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : Tuple, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : int = 8, **lowerCAmelCase : Optional[int], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Dict = do_rescale
lowercase : Tuple = rescale_factor
lowercase : List[str] = do_pad
lowercase : int = pad_size
def lowercase ( self : List[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : float, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int ) -> np.ndarray:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : int, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> List[Any]:
lowercase , lowercase : Tuple = get_image_size(lowerCAmelCase )
lowercase : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase : Dict = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[float] = None, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCAmelCase : Any, ) -> List[Any]:
lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Any = do_pad if do_pad is not None else self.do_pad
lowercase : int = pad_size if pad_size is not None else self.pad_size
lowercase : Tuple = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : Optional[int] = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_pad:
lowercase : List[str] = [self.pad(lowerCAmelCase, size=lowerCAmelCase ) for image in images]
lowercase : Optional[int] = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 53
| 1
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__a: Optional[int] = 20_48
__a: Union[str, Any] = 40_96
__a: List[Any] = 42
__a: List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
__a: Optional[Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __UpperCamelCase ( UpperCAmelCase ):
def choose_first(UpperCAmelCase , UpperCAmelCase=False ):
assert isinstance(UpperCAmelCase , UpperCAmelCase )
if len(UpperCAmelCase ) == 1:
lowercase__ : str = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase__ : List[str] = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
lowercase__ : str = {'''id''': example['''id''']}
lowercase__ : Any = example['''annotations''']
lowercase__ : List[str] = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase__ : int = ['''yes'''] if 1 in yes_no_answer else ['''no''']
lowercase__ : Dict = []
lowercase__ : str = []
lowercase__ : Tuple = ['''<cls>''']
else:
lowercase__ : Tuple = ['''short''']
lowercase__ : Any = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
lowercase__ : int = ['''long''']
lowercase__ : List[str] = choose_first(annotation['''long_answer'''] , is_long_answer=UpperCAmelCase )
lowercase__ : Optional[int] = []
answer.update(UpperCAmelCase )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase__ : Optional[Any] = True
else:
lowercase__ : Optional[int] = False
lowercase__ : List[Any] = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , UpperCAmelCase ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ):
lowercase__ : Optional[int] = _get_single_answer(UpperCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ : Any = example['''document''']['''tokens''']
lowercase__ : List[Any] = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(UpperCAmelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase__ : Any = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase__ : List[str] = example['''document''']['''tokens''']
lowercase__ : Optional[int] = answer['''start_token''']
lowercase__ : Optional[int] = answer['''end_token''']
lowercase__ : Any = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase__ : Union[str, Any] = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase__ : Any = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
lowercase__ : Any = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
lowercase__ : str = ''' '''.join([old[i] for i in range(len(UpperCAmelCase ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , UpperCAmelCase , end='''\n''' )
print('''Old:''' , UpperCAmelCase , end='''\n\n''' )
return {
"context": " ".join(UpperCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=2048 , UpperCAmelCase=4096 , UpperCAmelCase=True ):
# overlap will be of doc_stride - q_len
lowercase__ : Tuple = get_context_and_ans(UpperCAmelCase , assertion=UpperCAmelCase )
lowercase__ : List[str] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase__ : Optional[Any] = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
lowercase__ : Any = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ : List[Any] = []
lowercase__ : Any = []
lowercase__ : int = input_ids[:q_len]
lowercase__ : Any = range(UpperCAmelCase , len(UpperCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase__ : Optional[Any] = i + max_length - q_len
lowercase__ : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCAmelCase ),
"end_token": [-100] * len(UpperCAmelCase ),
"category": category,
},
}
lowercase__ : Optional[Any] = out['''context'''].split()
lowercase__ : List[Any] = splitted_context[answer['''end_token''']]
lowercase__ : int = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=UpperCAmelCase , ).input_ids )
lowercase__ : List[Any] = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=UpperCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase__ : int = len(tokenizer(UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase__ : List[Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
lowercase__ : Tuple = answer['''start_token''']
lowercase__ : List[Any] = answer['''end_token''']
if assertion:
lowercase__ : Union[str, Any] = tokenizer.decode(UpperCAmelCase )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , UpperCAmelCase , end='''\n\n''' )
if len(UpperCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase__ : List[str] = input_ids[:q_len]
lowercase__ : Tuple = range(UpperCAmelCase , len(UpperCAmelCase ) , max_length - doc_stride )
lowercase__ : List[str] = []
lowercase__ : Dict = []
lowercase__ : List[Any] = []
lowercase__ : List[Any] = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase__ : Optional[int] = i + max_length - q_len
lowercase__ : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase__ : Optional[int] = start_token - i + q_len
lowercase__ : Any = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
lowercase__ : Any = -100
lowercase__ : List[str] = -100
answers_category.append('''null''' )
lowercase__ : Dict = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase )
answers_end_token.append(UpperCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(UpperCAmelCase ) )
print('''Old:''' , tokenizer.decode(UpperCAmelCase ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=2048 , UpperCAmelCase=4096 , UpperCAmelCase=False ):
lowercase__ : Dict = get_strided_contexts_and_ans(
UpperCAmelCase , UpperCAmelCase , doc_stride=UpperCAmelCase , max_length=UpperCAmelCase , assertion=UpperCAmelCase , )
return example
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
with jsonlines.open(UpperCAmelCase , '''a''' ) as writer:
for example in tqdm(UpperCAmelCase , total=len(UpperCAmelCase ) , desc='''Saving samples ... ''' ):
lowercase__ : Union[str, Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__a: Optional[Any] = load_dataset("""natural_questions""")
__a: Optional[int] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
__a: int = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
__a: Tuple = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
__a: str = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__a: int = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
__a: int = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 198
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> None:
super().__init__(**__lowerCAmelCase )
lowercase__ : Optional[int] = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ : Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : Dict = do_resize
lowercase__ : int = size
lowercase__ : int = resample
lowercase__ : Tuple = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : int = do_normalize
lowercase__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Tuple = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
lowercase__ : Union[str, Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowercase__ : Any = (size['''height'''], size['''width'''])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> PIL.Image.Image:
lowercase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = resample if resample is not None else self.resample
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase__ : Dict = image_std if image_std is not None else self.image_std
lowercase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : str = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase__ : List[str] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase__ : Tuple = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase__ : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 198
| 1
|
from statistics import mean
import numpy as np
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> list:
lowerCAmelCase_ = 0
# Number of processes finished
lowerCAmelCase_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase_ = [0] * no_of_process
# List to include calculation results
lowerCAmelCase_ = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase_ = [burst_time[i] for i in np.argsort(a_ )]
lowerCAmelCase_ = [process_name[i] for i in np.argsort(a_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase_ = arrival_time[i]
lowerCAmelCase_ = 0
# Index showing the location of the process being performed
lowerCAmelCase_ = 0
# Saves the current response ratio.
lowerCAmelCase_ = 0
for i in range(0 , a_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase_ = temp
lowerCAmelCase_ = i
# Calculate the turn around time
lowerCAmelCase_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> list:
lowerCAmelCase_ = [0] * no_of_process
for i in range(0 , a_ ):
lowerCAmelCase_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase_ = 5
lowerCamelCase_ = ["""A""", """B""", """C""", """D""", """E"""]
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 14
|
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Any = logging.get_logger(__name__)
A_ :int = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""vit_msn"""
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-06 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : int =hidden_size
__UpperCamelCase : List[Any] =num_hidden_layers
__UpperCamelCase : Union[str, Any] =num_attention_heads
__UpperCamelCase : List[str] =intermediate_size
__UpperCamelCase : Union[str, Any] =hidden_act
__UpperCamelCase : str =hidden_dropout_prob
__UpperCamelCase : Union[str, Any] =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : Optional[Any] =image_size
__UpperCamelCase : Optional[int] =patch_size
__UpperCamelCase : Any =num_channels
__UpperCamelCase : str =qkv_bias
| 71
|
A_ :str = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 71
| 1
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
UpperCamelCase__ : List[str] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE ( ) -> str:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> None:
if start is None:
UpperCamelCase__ : Union[str, Any] = 0
if end is None:
UpperCamelCase__ : List[Any] = len(__lowerCAmelCase ) - 1
if start >= end:
return
UpperCamelCase__ : Union[str, Any] = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196
| 1
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = x
UpperCAmelCase__ : int = y
for step in range(A__ ): # noqa: B007
UpperCAmelCase__ : Union[str, Any] = a * a - b * b + x
UpperCAmelCase__ : Tuple = 2 * a * b + y
UpperCAmelCase__ : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( UpperCamelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( UpperCamelCase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(A__ , 1 , 1 ) )
def _UpperCamelCase ( UpperCamelCase__ = 8_0_0 , UpperCamelCase__ = 6_0_0 , UpperCamelCase__ = -0.6 , UpperCamelCase__ = 0 , UpperCamelCase__ = 3.2 , UpperCamelCase__ = 5_0 , UpperCamelCase__ = True , ):
UpperCAmelCase__ : str = Image.new("""RGB""" , (image_width, image_height) )
UpperCAmelCase__ : List[str] = img.load()
# loop through the image-coordinates
for image_x in range(A__ ):
for image_y in range(A__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase__ : str = figure_width / image_width * image_height
UpperCAmelCase__ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase__ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase__ : int = get_distance(A__ , A__ , A__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase__ : List[Any] = get_color_coded_rgb(A__ )
else:
UpperCAmelCase__ : Any = get_black_and_white_rgb(A__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__A =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 163
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
| 0
|
"""simple docstring"""
from typing import Any
class __snake_case :
def __init__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = data
a__: Optional[int] = None
def __repr__( self) -> str:
'''simple docstring'''
return f'Node({self.data})'
class __snake_case :
def __init__( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = None
def __iter__( self) -> Any:
'''simple docstring'''
a__: Any = self.head
while node:
yield node.data
a__: Optional[Any] = node.next
def __len__( self) -> int:
'''simple docstring'''
return sum(1 for _ in self)
def __repr__( self) -> str:
'''simple docstring'''
return "->".join([str(lowercase) for item in self])
def __getitem__( self , lowercase) -> Any:
'''simple docstring'''
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , lowercase , lowercase) -> None:
'''simple docstring'''
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
a__: Union[str, Any] = self.head
for _ in range(lowercase):
a__: Optional[int] = current.next
a__: int = data
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
self.insert_nth(len(self) , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
self.insert_nth(0 , lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
a__: Optional[int] = Node(lowercase)
if self.head is None:
a__: str = new_node
elif index == 0:
a__: Dict = self.head # link new_node to head
a__: Union[str, Any] = new_node
else:
a__: Dict = self.head
for _ in range(index - 1):
a__: str = temp.next
a__: Tuple = temp.next
a__: Optional[int] = new_node
def lowerCamelCase_ ( self) -> None: # print every node data
'''simple docstring'''
print(self)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return self.delete_nth(0)
def lowerCamelCase_ ( self) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self) - 1)
def lowerCamelCase_ ( self , lowercase = 0) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
a__: str = self.head # default first node
if index == 0:
a__: Optional[Any] = self.head.next
else:
a__: Optional[Any] = self.head
for _ in range(index - 1):
a__: int = temp.next
a__: Union[str, Any] = temp.next
a__: Optional[int] = temp.next.next
return delete_node.data
def lowerCamelCase_ ( self) -> bool:
'''simple docstring'''
return self.head is None
def lowerCamelCase_ ( self) -> None:
'''simple docstring'''
a__: Optional[int] = None
a__: List[Any] = self.head
while current:
# Store the current node's next node.
a__: Tuple = current.next
# Make the current node's next point backwards
a__: Union[str, Any] = prev
# Make the previous node be the current node
a__: Any = current
# Make the current node the next node (to progress iteration)
a__: List[str] = next_node
# Return prev in order to put the head at the end
a__: Any = prev
def __a ( ) ->None:
a__: Optional[int] = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
a__: Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def __a ( ) ->None:
a__: int = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
a__: List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a__: Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a__: List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a__: Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __a ( ) ->Union[str, Any]:
from doctest import testmod
testmod()
a__: Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_SCREAMING_SNAKE_CASE )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
a__: Any = input('Enter New Value: ' ).strip()
print('New list:' )
print(_SCREAMING_SNAKE_CASE )
print(F'length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}' )
if __name__ == "__main__":
main()
| 353
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ = re.compile(r'\b(a|an|the)\b', re.UNICODE)
lowercase__ = None
def __a ( ) ->List[Any]:
a__: Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_SCREAMING_SNAKE_CASE , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_SCREAMING_SNAKE_CASE , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(' ' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
a__: Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Any = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
a__: Tuple = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__: Any = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Dict = (2 * precision * recall) / (precision + recall)
return fa
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Union[str, Any] = {}
a__: Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[int] = qa['id']
a__: List[Any] = [t for t in qa['answers']['text'] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__: str = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
a__: Any = preds[qid]
# Take max over all gold answers
a__: List[str] = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
a__: Optional[int] = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: List[str] = {}
for qid, s in scores.items():
a__: List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a__: Optional[int] = float(not qid_to_has_ans[qid] )
else:
a__: Optional[Any] = s
return new_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
if not qid_list:
a__: str = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__: Optional[Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
for k in new_eval:
a__: List[Any] = new_eval[k]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: Optional[int] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
a__: Dict = 0.0
a__: Optional[int] = 1.0
a__: Tuple = 0.0
a__: Tuple = [1.0]
a__: Optional[Any] = [0.0]
a__: Optional[Any] = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__: Optional[Any] = true_pos / float(i + 1 )
a__: int = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
a__: Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__: Optional[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__: List[str] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__: Optional[Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
a__: List[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_exact' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_f1' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_oracle' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if not qid_list:
return
a__: Any = [na_probs[k] for k in qid_list]
a__: List[str] = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'na_prob_hist_{name}.png' ) )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__: List[Any] = num_no_ans
a__: Union[str, Any] = cur_score
a__: Optional[Any] = 0.0
a__: str = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__: Tuple = scores[qid]
else:
if preds[qid]:
a__: Optional[Any] = -1
else:
a__: Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a__: Dict = cur_score
a__: Optional[int] = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__ , a__: str = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ , a__: Optional[int] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = best_exact
a__: Dict = exact_thresh
a__: Optional[int] = best_fa
a__: str = fa_thresh
def __a ( ) ->int:
with open(OPTS.data_file ) as f:
a__: Tuple = json.load(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
else:
a__: Optional[Any] = {k: 0.0 for k in preds}
a__: List[Any] = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
a__: Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
a__: Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__ , a__: Optional[Any] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: str = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
a__: List[str] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'HasAns' )
if no_ans_qids:
a__: Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
lowercase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 203
| 0
|
import os
from distutils.util import strtobool
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Tuple ):
"""simple docstring"""
for e in env_keys:
SCREAMING_SNAKE_CASE_ : Dict = int(os.environ.get(lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = os.environ.get(lowerCAmelCase , str(lowerCAmelCase ) )
return strtobool(lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Tuple="no" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = os.environ.get(lowerCAmelCase , str(lowerCAmelCase ) )
return value
| 18
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 112
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int ="""dpt"""
def __init__( self : Optional[int], __lowercase : Union[str, Any]=768, __lowercase : int=12, __lowercase : str=12, __lowercase : Union[str, Any]=3072, __lowercase : int="gelu", __lowercase : Dict=0.0, __lowercase : Optional[Any]=0.0, __lowercase : Optional[Any]=0.02, __lowercase : List[str]=1e-1_2, __lowercase : int=384, __lowercase : int=16, __lowercase : Optional[Any]=3, __lowercase : int=False, __lowercase : str=True, __lowercase : List[Any]=[2, 5, 8, 11], __lowercase : List[Any]="project", __lowercase : List[Any]=[4, 2, 1, 0.5], __lowercase : List[str]=[96, 192, 384, 768], __lowercase : Optional[int]=256, __lowercase : Dict=-1, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : Optional[int]=0.4, __lowercase : str=255, __lowercase : Dict=0.1, __lowercase : List[Any]=[1, 1024, 24, 24], __lowercase : Optional[Any]=[0, 1], __lowercase : Dict=None, **__lowercase : List[Any], ):
super().__init__(**__A )
lowercase__ = hidden_size
lowercase__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
lowercase__ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
lowercase__ = BitConfig(**__A )
elif isinstance(__A, __A ):
logger.info("Initializing the config with a `BiT` backbone." )
lowercase__ = BitConfig(**__A )
elif isinstance(__A, __A ):
lowercase__ = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowercase__ = backbone_featmap_shape
lowercase__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be \'project\' when using `DPT-hybrid` mode." )
else:
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of [\'ignore\', \'add\', \'project\']" )
lowercase__ = readout_type
lowercase__ = reassemble_factors
lowercase__ = neck_hidden_sizes
lowercase__ = fusion_hidden_size
lowercase__ = head_in_index
lowercase__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = semantic_loss_ignore_index
lowercase__ = semantic_classifier_dropout
def A__ ( self : Any ):
lowercase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 359
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""whisper"""
UpperCamelCase__ : Optional[int] =["""past_key_values"""]
UpperCamelCase__ : Optional[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any], __lowercase : List[str]=5_1865, __lowercase : Dict=80, __lowercase : List[Any]=6, __lowercase : Union[str, Any]=4, __lowercase : Tuple=6, __lowercase : Dict=4, __lowercase : List[Any]=1536, __lowercase : Tuple=1536, __lowercase : Any=0.0, __lowercase : List[str]=0.0, __lowercase : List[Any]=5_0257, __lowercase : List[str]=True, __lowercase : str=True, __lowercase : int="gelu", __lowercase : Tuple=256, __lowercase : Tuple=0.0, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=0.0, __lowercase : List[Any]=0.02, __lowercase : Union[str, Any]=False, __lowercase : str=1500, __lowercase : Optional[int]=448, __lowercase : Optional[Any]=5_0256, __lowercase : Tuple=5_0256, __lowercase : Any=5_0256, __lowercase : Union[str, Any]=None, __lowercase : Any=[220, 5_0256], __lowercase : List[Any]=False, __lowercase : int=256, __lowercase : int=False, __lowercase : Tuple=0.05, __lowercase : int=10, __lowercase : Dict=2, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=10, __lowercase : Union[str, Any]=0, __lowercase : Tuple=7, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, suppress_tokens=__lowercase, begin_suppress_tokens=__lowercase, **__lowercase, )
class _snake_case ( lowercase__):
@property
def A__ ( self : str ):
lowercase__ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ = {0: "batch"}
else:
lowercase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowercase, direction="inputs" )
return common_inputs
def A__ ( self : int, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional["TensorType"] = None, __lowercase : int = 2_2050, __lowercase : float = 5.0, __lowercase : int = 220, ):
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=__lowercase, framework=__lowercase, sampling_rate=__lowercase, time_duration=__lowercase, frequency=__lowercase, )
lowercase__ = encoder_inputs["input_features"].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer, __lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = encoder_inputs.pop("input_features" )
lowercase__ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def A__ ( self : int ):
return 1e-3
| 224
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.