code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class _snake_case ( a__ ):
lowerCAmelCase :str = '''data2vec-vision'''
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : str = use_mask_token
UpperCAmelCase__ : Union[str, Any] = use_absolute_position_embeddings
UpperCAmelCase__ : Tuple = use_relative_position_bias
UpperCAmelCase__ : Any = use_shared_relative_position_bias
UpperCAmelCase__ : Union[str, Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[int] = drop_path_rate
UpperCAmelCase__ : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase__ : List[str] = out_indices
UpperCAmelCase__ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase__ : List[str] = use_auxiliary_head
UpperCAmelCase__ : Any = auxiliary_loss_weight
UpperCAmelCase__ : Any = auxiliary_channels
UpperCAmelCase__ : Optional[Any] = auxiliary_num_convs
UpperCAmelCase__ : Dict = auxiliary_concat_input
UpperCAmelCase__ : str = semantic_loss_ignore_index
class _snake_case ( a__ ):
lowerCAmelCase :Tuple = version.parse('''1.11''' )
@property
def snake_case__ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def snake_case__ ( self):
return 1e-4 | 163 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _UpperCamelCase ( UpperCamelCase__ ):
return x + 2
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
UpperCAmelCase__ : Optional[int] = """x = y"""
UpperCAmelCase__ : Optional[Any] = {"""y""": 5}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 5, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Any = """y = add_two(x)"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = """x = 3\ny = 5"""
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Dict = """text = f'This is x: {x}.'"""
UpperCAmelCase__ : str = {"""x""": 3}
UpperCAmelCase__ : Optional[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """text""": """This is x: 3."""})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 2})
UpperCAmelCase__ : Optional[int] = {"""x""": 8}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 8, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_list = [x, add_two(x)]"""
UpperCAmelCase__ : int = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [3, 5])
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = """y = x"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """test_list = [x, add_two(x)]\ntest_list[1]"""
UpperCAmelCase__ : Union[str, Any] = {"""x""": 3}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
UpperCAmelCase__ : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """x = 0\nfor i in range(3):\n x = i"""
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""range""": range} , state=_lowerCamelCase)
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 2, """i""": 2}) | 163 | 1 |
from __future__ import annotations
def _UpperCamelCase (a__ :list[int] ):
"""simple docstring"""
return len(set(a__ ) ) == len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase__ = g.get_repo("""huggingface/transformers""" )
UpperCamelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
UpperCamelCase__ = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 87 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A_ : Dict = logging.get_logger(__name__)
A_ : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A_ : Any = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
A_ : Optional[Any] = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCamelCase__ : List[str] = RobertaTokenizer
def __init__( self : Optional[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : str="replace" , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Union[str, Any]="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : Optional[Any]="<pad>" , __UpperCAmelCase : List[Any]="<mask>" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : Union[str, Any] , ) -> str:
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = """post_processor"""
SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE__ = False
if state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = True
if state.get("""trim_offsets""" , __UpperCAmelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE__ = trim_offsets
SCREAMING_SNAKE_CASE__ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
SCREAMING_SNAKE_CASE__ = value
def SCREAMING_SNAKE_CASE ( self : str , *__UpperCAmelCase : Any , **__UpperCAmelCase : List[str] ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *__UpperCAmelCase : int , **__UpperCAmelCase : str ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=None ) -> Dict:
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 165 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
SCREAMING_SNAKE_CASE__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCAmelCase )
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""nielsr/rvlcdip-demo""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 165 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
UpperCamelCase_: str = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: Optional[int] = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
@slow
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
UpperCamelCase_: Tuple = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: int = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
UpperCamelCase_: Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
UpperCamelCase_: Any = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
| 358 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
UpperCamelCase_: Union[str, Any] = GPTaConfig()
else:
UpperCamelCase_: Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase )
UpperCamelCase_: List[Any] = GPTaModel(lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
UpperCamelCase_: int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase_: Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 223 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase__ = True
for i in range(0 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ = False
for i in range(1 , len(lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
lowerCAmelCase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase__ : List[Any] = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 98 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201 | 0 |
'''simple docstring'''
def _A (lowerCAmelCase__ :list[list[int | float]] ) -> int:
'''simple docstring'''
_a = len(lowerCAmelCase__ )
_a = len(matrix[0] )
_a = min(lowerCAmelCase__ , lowerCAmelCase__ )
for row in range(lowerCAmelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCAmelCase__ ):
_a = matrix[col][row] / matrix[row][row]
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_a = True
for i in range(row + 1 , lowerCAmelCase__ ):
if matrix[i][row] != 0:
_a , _a = matrix[i], matrix[row]
_a = False
break
if reduce:
rank -= 1
for i in range(lowerCAmelCase__ ):
_a = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
_a = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def _A (lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
_a = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_a = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('P' ):
return True
return False
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = True
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = -1_0_0
_lowerCAmelCase = "pt"
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
import torch
_a = 'label' if 'label' in features[0].keys() else 'labels'
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
__magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['entity_ids'] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(__magic_name__ ) + [self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) + list(__magic_name__ ) for label in labels
]
_a = [feature['ner_tags'] for feature in features]
_a = padding_tensor(__magic_name__ , -1 , __magic_name__ , __magic_name__ )
_a = [feature['original_entity_spans'] for feature in features]
_a = padding_tensor(__magic_name__ , (-1, -1) , __magic_name__ , __magic_name__ )
_a = {k: torch.tensor(__magic_name__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A_ ( snake_case : List[DatasetType] , snake_case : Optional[List[float]] = None , snake_case : Optional[int] = None , snake_case : Optional[DatasetInfo] = None , snake_case : Optional[NamedSplit] = None , snake_case : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}." )
if i == 0:
__UpperCamelCase , __UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A_ ( snake_case : List[DatasetType] , snake_case : Optional[DatasetInfo] = None , snake_case : Optional[NamedSplit] = None , snake_case : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}." )
if i == 0:
__UpperCamelCase , __UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 328 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : List[str] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
__UpperCamelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = SqueezeBertTokenizer
def __init__( self : str ,lowercase_ : Union[str, Any]=None ,lowercase_ : int=None ,lowercase_ : List[str]=True ,lowercase_ : str="[UNK]" ,lowercase_ : int="[SEP]" ,lowercase_ : Tuple="[PAD]" ,lowercase_ : Optional[int]="[CLS]" ,lowercase_ : Dict="[MASK]" ,lowercase_ : Optional[Any]=True ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[Any] ,):
super().__init__(
lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,tokenize_chinese_chars=lowercase_ ,strip_accents=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,lowercase_ ) != tokenize_chinese_chars
):
lowerCAmelCase__ : List[str] = getattr(lowercase_ ,normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : Optional[int] = strip_accents
lowerCAmelCase__ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase__ : Optional[int] = normalizer_class(**lowercase_ )
lowerCAmelCase__ : int = do_lower_case
def __lowerCAmelCase ( self : Any ,lowercase_ : Any ,lowercase_ : Optional[Any]=None ):
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : str ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
lowerCAmelCase__ : int = self._tokenizer.model.save(lowercase_ ,name=lowercase_ )
return tuple(lowercase_ )
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 74 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __A (unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = StableDiffusionLDMaDPipeline
__lowercase: Any = TEXT_TO_IMAGE_PARAMS
__lowercase: Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ = CLIPTextModel(UpperCAmelCase_ )
snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0 ) ->int:
"""simple docstring"""
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionLDMaDPipeline(**UpperCAmelCase_ )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb[0, -3:, -3:, -1]
snake_case_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case_ = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
snake_case_ = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionLDMaDPipeline(**UpperCAmelCase_ )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = 3 * [inputs["""prompt"""]]
# forward
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb_slice_a[0, -3:, -3:, -1]
snake_case_ = depth_slice_a[0, -3:, -1]
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = 3 * [inputs.pop("""prompt""" )]
snake_case_ = ldmad_pipe.tokenizer(
UpperCAmelCase_ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="""pt""" , )
snake_case_ = text_inputs["""input_ids"""].to(UpperCAmelCase_ )
snake_case_ = ldmad_pipe.text_encoder(UpperCAmelCase_ )[0]
snake_case_ = prompt_embeds
# forward
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb_slice_a[0, -3:, -3:, -1]
snake_case_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
snake_case_ = StableDiffusionLDMaDPipeline(**UpperCAmelCase_ )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = """french fries"""
snake_case_ = ldmad_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb[0, -3:, -3:, -1]
snake_case_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case_ = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
snake_case_ = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict="cpu" , UpperCAmelCase_ : Optional[Any]=torch.floataa , UpperCAmelCase_ : Optional[Any]=0 ) ->Dict:
"""simple docstring"""
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = np.random.RandomState(UpperCAmelCase_ ).standard_normal((1, 4, 64, 64) )
snake_case_ = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb[0, -3:, -3:, -1].flatten()
snake_case_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
snake_case_ = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
snake_case_ = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]="cpu" , UpperCAmelCase_ : Tuple=torch.floataa , UpperCAmelCase_ : List[str]=0 ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = np.random.RandomState(UpperCAmelCase_ ).standard_normal((1, 4, 64, 64) )
snake_case_ = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = 0.495_586
snake_case_ = 0.33_795_515
snake_case_ = 112.48_518
snake_case_ = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = 0.4_194_127
snake_case_ = 0.35_375_586
snake_case_ = 0.5_638_502
snake_case_ = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 347 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """upernet"""
def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
_a = precision
_a = ceil(precision / 14 )
_a = 42_68_80 * Decimal(1_00_05 ).sqrt()
_a = 1
_a = 13_59_14_09
_a = Decimal(__lowerCAmelCase )
for k in range(1, __lowerCAmelCase ):
_a = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__snake_case = 50
print(f'The first {n} digits of pi is: {pi(n)}') | 362 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution()) | 153 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowercase = random.Random()
def snake_case__ ( _A: Union[str, Any] , _A: Optional[Any]=1.0 , _A: Dict=None , _A: Optional[Any]=None ) -> Tuple:
'''simple docstring'''
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=400 , __lowerCAmelCase=2000 , __lowerCAmelCase=2048 , __lowerCAmelCase=128 , __lowerCAmelCase=1 , __lowerCAmelCase=512 , __lowerCAmelCase=30 , __lowerCAmelCase=44100 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = spectrogram_length
lowerCAmelCase = feature_size
lowerCAmelCase = num_audio_channels
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = sampling_rate
def a_ ( self):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a_ ( self , __lowerCAmelCase=False , __lowerCAmelCase=False):
"""simple docstring"""
def _flatten(__lowerCAmelCase):
return list(itertools.chain(*__lowerCAmelCase))
if equal_length:
lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
lowerCAmelCase = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
lowerCAmelCase = [np.asarray(__lowerCAmelCase) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = TvltFeatureExtractor
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TvltFeatureExtractionTester(self)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """spectrogram_length"""))
self.assertTrue(hasattr(__lowerCAmelCase , """feature_size"""))
self.assertTrue(hasattr(__lowerCAmelCase , """num_audio_channels"""))
self.assertTrue(hasattr(__lowerCAmelCase , """hop_length"""))
self.assertTrue(hasattr(__lowerCAmelCase , """chunk_length"""))
self.assertTrue(hasattr(__lowerCAmelCase , """sampling_rate"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = feat_extract_first.save_pretrained(__lowerCAmelCase)[0]
check_json_file_has_correct_format(__lowerCAmelCase)
lowerCAmelCase = self.feature_extraction_class.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = feat_extract_first.to_dict()
lowerCAmelCase = feat_extract_second.to_dict()
lowerCAmelCase = dict_first.pop("""mel_filters""")
lowerCAmelCase = dict_second.pop("""mel_filters""")
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase))
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(__lowerCAmelCase , """feat_extract.json""")
feat_extract_first.to_json_file(__lowerCAmelCase)
lowerCAmelCase = self.feature_extraction_class.from_json_file(__lowerCAmelCase)
lowerCAmelCase = feat_extract_first.to_dict()
lowerCAmelCase = feat_extract_second.to_dict()
lowerCAmelCase = dict_first.pop("""mel_filters""")
lowerCAmelCase = dict_second.pop("""mel_filters""")
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase))
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
lowerCAmelCase = [np.asarray(__lowerCAmelCase) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44100).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
lowerCAmelCase = feature_extractor(__lowerCAmelCase , return_tensors="""np""" , sampling_rate=44100).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
lowerCAmelCase = feature_extractor(
__lowerCAmelCase , return_tensors="""np""" , sampling_rate=44100 , mask_audio=__lowerCAmelCase).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x))[0] for x in (800, 800, 800)]
lowerCAmelCase = np.asarray(__lowerCAmelCase)
lowerCAmelCase = feature_extractor(__lowerCAmelCase , return_tensors="""np""" , sampling_rate=44100).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""")
# automatic decoding with librispeech
lowerCAmelCase = ds.sort("""id""").select(range(__lowerCAmelCase))[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self._load_datasamples(1)
lowerCAmelCase = TvltFeatureExtractor()
lowerCAmelCase = feature_extractor(__lowerCAmelCase , return_tensors="""pt""").audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128))
lowerCAmelCase = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCAmelCase , atol=1E-4))
| 272 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=4 , ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :List[str] = batch_size
lowerCAmelCase_ :List[Any] = seq_length
lowerCAmelCase_ :int = is_training
lowerCAmelCase_ :Optional[int] = use_attention_mask
lowerCAmelCase_ :Tuple = use_token_type_ids
lowerCAmelCase_ :List[Any] = use_labels
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :Optional[Any] = num_attention_heads
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :int = max_position_embeddings
lowerCAmelCase_ :Dict = type_vocab_size
lowerCAmelCase_ :Any = type_sequence_label_size
lowerCAmelCase_ :Optional[int] = initializer_range
lowerCAmelCase_ :Tuple = num_choices
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Any = None
if self.use_attention_mask:
lowerCAmelCase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Any = None
if self.use_token_type_ids:
lowerCAmelCase_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Any = self.prepare_config_and_inputs()
lowerCAmelCase_ :Tuple = config_and_inputs
lowerCAmelCase_ :str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = True
UpperCAmelCase_ :int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = FlaxRoFormerModelTester(self )
@slow
def __lowerCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowerCAmelCase_ :Optional[int] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__A )
lowerCAmelCase_ :Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCAmelCase_ :List[str] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :Union[str, Any] = model(__A )[0]
lowerCAmelCase_ :int = 5_0000
lowerCAmelCase_ :Optional[int] = (1, 6, vocab_size)
self.assertEqual(output.shape , __A )
lowerCAmelCase_ :List[str] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
| 355 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 0 |
def UpperCAmelCase ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
__lowercase = len(set_a.intersection(snake_case__ ) )
if alternative_union:
__lowercase = len(snake_case__ ) + len(snake_case__ )
else:
__lowercase = len(set_a.union(snake_case__ ) )
return intersection / union
if isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) ):
__lowercase = [element for element in set_a if element in set_b]
if alternative_union:
__lowercase = len(snake_case__ ) + len(snake_case__ )
return len(snake_case__ ) / union
else:
__lowercase = set_a + [element for element in set_b if element not in set_a]
return len(snake_case__ ) / len(snake_case__ )
return len(snake_case__ ) / len(snake_case__ )
return None
if __name__ == "__main__":
__a : Dict = {"""a""", """b""", """c""", """d""", """e"""}
__a : int = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b)) | 210 |
"""simple docstring"""
from collections.abc import Generator
def a__ ( ) -> Generator[int, None, None]:
lowerCamelCase , lowerCamelCase = 0, 1
while True:
lowerCamelCase , lowerCamelCase = b, a + b
yield b
def a__ ( snake_case__ = 10_00 ) -> int:
lowerCamelCase = 1
lowerCamelCase = fibonacci_generator()
while len(str(next(snake_case__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 291 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def UpperCAmelCase ( a_, a_=False, a_=False, a_=False ):
'''simple docstring'''
lowerCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : Any = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : Any = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Dict = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a_, a_ )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = dct.pop(a_ )
lowerCamelCase : List[Any] = val
@torch.no_grad()
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = ViltConfig(image_size=384, patch_size=32, tie_word_embeddings=a_ )
lowerCamelCase : Tuple = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Optional[int] = False
if "vqa" in checkpoint_url:
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[Any] = 3129
lowerCamelCase : Dict = 'huggingface/label-files'
lowerCamelCase : Union[str, Any] = 'vqa2-id2label.json'
lowerCamelCase : List[str] = json.load(open(hf_hub_download(a_, a_, repo_type='dataset' ), 'r' ) )
lowerCamelCase : Optional[int] = {int(a_ ): v for k, v in idalabel.items()}
lowerCamelCase : List[str] = idalabel
lowerCamelCase : int = {v: k for k, v in idalabel.items()}
lowerCamelCase : Optional[int] = ViltForQuestionAnswering(a_ )
elif "nlvr" in checkpoint_url:
lowerCamelCase : List[Any] = True
lowerCamelCase : Any = 2
lowerCamelCase : Optional[int] = {0: 'False', 1: 'True'}
lowerCamelCase : Any = {v: k for k, v in config.idalabel.items()}
lowerCamelCase : Tuple = 3
lowerCamelCase : Optional[int] = ViltForImagesAndTextClassification(a_ )
elif "irtr" in checkpoint_url:
lowerCamelCase : List[Any] = True
lowerCamelCase : str = ViltForImageAndTextRetrieval(a_ )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = ViltForMaskedLM(a_ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowerCamelCase : str = torch.hub.load_state_dict_from_url(a_, map_location='cpu' )['state_dict']
lowerCamelCase : Dict = create_rename_keys(a_, a_, a_, a_ )
for src, dest in rename_keys:
rename_key(a_, a_, a_ )
read_in_q_k_v(a_, a_ )
if mlm_model or irtr_model:
lowerCamelCase : List[str] = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a_, a_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase , lowerCamelCase : Any = model.load_state_dict(a_, strict=a_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a_ )
# Define processor
lowerCamelCase : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase : Any = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCamelCase : List[str] = ViltProcessor(a_, a_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg', stream=a_ ).raw )
lowerCamelCase : str = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg', stream=a_ ).raw )
lowerCamelCase : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowerCamelCase : Union[str, Any] = processor(a_, a_, return_tensors='pt' )
lowerCamelCase : Any = processor(a_, a_, return_tensors='pt' )
lowerCamelCase : Any = model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
lowerCamelCase : Any = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg', stream=a_ ).raw )
if mlm_model:
lowerCamelCase : str = 'a bunch of [MASK] laying on a [MASK].'
else:
lowerCamelCase : str = 'How many cats are there?'
lowerCamelCase : Optional[int] = processor(a_, a_, return_tensors='pt' )
lowerCamelCase : Union[str, Any] = model(**a_ )
# Verify outputs
if mlm_model:
lowerCamelCase : Dict = torch.Size([1, 11, 3_0522] )
lowerCamelCase : str = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], a_, atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase : int = torch.Size([1, 3129] )
lowerCamelCase : str = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], a_, atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase : List[str] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase : Tuple = torch.Size([1, 2] )
lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(a_ ).mkdir(exist_ok=a_ )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_A = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 205 |
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return str(a_ ) == str(a_ )[::-1]
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return int(a_ ) + int(str(a_ )[::-1] )
def UpperCAmelCase ( a_ = 1_0000 ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = []
for num in range(1, a_ ):
lowerCamelCase : List[str] = 0
lowerCamelCase : Union[str, Any] = num
while iterations < 50:
lowerCamelCase : Optional[int] = sum_reverse(a_ )
iterations += 1
if is_palindrome(a_ ):
break
else:
lychrel_nums.append(a_ )
return len(a_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 205 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , ) -> List[str]:
__lowercase : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
__lowercase : Optional[int] = parent
__lowercase : str = batch_size
__lowercase : List[Any] = num_channels
__lowercase : List[str] = image_size
__lowercase : List[Any] = min_resolution
__lowercase : List[Any] = max_resolution
__lowercase : Any = do_resize
__lowercase : Dict = size
__lowercase : Dict = apply_ocr
def _lowerCamelCase ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _a , unittest.TestCase ):
UpperCamelCase =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Any = LayoutLMvaImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''apply_ocr''' ) )
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> int:
pass
def _lowerCamelCase ( self ) -> Optional[Any]:
# Initialize image_processing
__lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__lowercase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __lowerCamelCase )
self.assertIsInstance(encoding.boxes , __lowerCamelCase )
# Test batched
__lowercase : Optional[Any] = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Dict:
# Initialize image_processing
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__lowercase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Tuple = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Dict = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# with apply_OCR = True
__lowercase : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase : int = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
__lowercase : Any = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__lowercase : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__lowercase : List[str] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCamelCase )
self.assertListEqual(encoding.boxes , __lowerCamelCase )
# with apply_OCR = False
__lowercase : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase )
__lowercase : Tuple = image_processing(__lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 249 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """openai/whisper-base"""
snake_case__ : Optional[int] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
snake_case__ : Any = """transcriber"""
snake_case__ : Optional[int] = WhisperProcessor
snake_case__ : str = WhisperForConditionalGeneration
snake_case__ : Optional[Any] = ["""audio"""]
snake_case__ : Any = ["""text"""]
def _A ( self : str , __lowerCamelCase : Dict ):
return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features
def _A ( self : Dict , __lowerCamelCase : List[Any] ):
return self.model.generate(inputs=__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : Optional[Any] ):
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 38 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError("""Input value must be an 'int' type""" )
_snake_case : Optional[int] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 132 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int:
return (pow(snake_case__ , 2 ) + step) % modulus
for _ in range(snake_case__ ):
# These track the position within the cycle detection logic.
_snake_case : Optional[int] = seed
_snake_case : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_snake_case : Any = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : Optional[Any] = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = rand_fn(snake_case__ , snake_case__ , snake_case__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_snake_case : str = gcd(hare - tortoise , snake_case__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_snake_case : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A_ = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
A_ = parser.parse_args()
A_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
A_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 132 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCamelCase = random.Random()
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : int=None , _lowerCamelCase : int=None):
if rng is None:
lowercase__ : Optional[int] = global_rng
lowercase__ : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class snake_case_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any=7 , lowercase_ : List[str]=4_00 , lowercase_ : int=20_00 , lowercase_ : Union[str, Any]=10 , lowercase_ : Tuple=1_60 , lowercase_ : Dict=8 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=40_00 , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=True , ) -> List[Any]:
lowercase__ : Any = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Dict = min_seq_length
lowercase__ : Optional[Any] = max_seq_length
lowercase__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : Tuple = padding_value
lowercase__ : Optional[Any] = sampling_rate
lowercase__ : Optional[int] = return_attention_mask
lowercase__ : Union[str, Any] = do_normalize
lowercase__ : List[str] = feature_size
lowercase__ : List[str] = chunk_length
lowercase__ : List[Any] = hop_length
def __UpperCamelCase ( self : Dict ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str=False , lowercase_ : Optional[int]=False ) -> Union[str, Any]:
def _flatten(lowercase_ : List[str] ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
lowercase__ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : Tuple = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( __A ,unittest.TestCase ):
__A : Any = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : Tuple = WhisperFeatureExtractionTester(self )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Union[str, Any] = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
lowercase__ : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase_ )
lowercase__ : Union[str, Any] = feat_extract_first.to_dict()
lowercase__ : List[Any] = feat_extract_second.to_dict()
lowercase__ : Optional[Any] = feat_extract_first.mel_filters
lowercase__ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = os.path.join(lowercase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowercase_ )
lowercase__ : Optional[Any] = self.feature_extraction_class.from_json_file(lowercase_ )
lowercase__ : List[Any] = feat_extract_first.to_dict()
lowercase__ : Tuple = feat_extract_second.to_dict()
lowercase__ : int = feat_extract_first.mel_filters
lowercase__ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : Tuple = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : Dict = feature_extractor(lowercase_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase__ : str = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
lowercase__ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
lowercase__ : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : Any = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowercase__ : Optional[Any] = np.asarray(lowercase_ )
lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test truncation required
lowercase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
lowercase__ : int = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
lowercase__ : Optional[int] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase__ : Optional[int] = [np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated]
lowercase__ : List[str] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def __UpperCamelCase ( self : int ) -> Dict:
import torch
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Dict = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowercase__ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
lowercase__ : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : Dict = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : Tuple ) -> Tuple:
# fmt: off
lowercase__ : Any = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowercase__ : Dict = self._load_datasamples(1 )
lowercase__ : Union[str, Any] = WhisperFeatureExtractor()
lowercase__ : List[Any] = feature_extractor(lowercase_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
def __UpperCamelCase ( self : Any ) -> str:
lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Tuple = self._load_datasamples(1 )[0]
lowercase__ : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
lowercase__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0]
self.assertTrue(np.all(np.mean(lowercase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1E-3 ) )
| 87 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> list:
"""simple docstring"""
for i in range(len(__snake_case ) - 1, 0, -1 ):
_UpperCamelCase = False
for j in range(__snake_case, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCamelCase , _UpperCamelCase = unsorted[j - 1], unsorted[j]
_UpperCamelCase = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
_UpperCamelCase , _UpperCamelCase = unsorted[j + 1], unsorted[j]
_UpperCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = input("""Enter numbers separated by a comma:\n""").strip()
_a = [int(item) for item in user_input.split(""",""")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 100 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowercase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowercase__ = 'image_qa'
lowercase__ = AutoProcessor
lowercase__ = AutoModelForVisualQuestionAnswering
lowercase__ = ['image', 'text']
lowercase__ = ['text']
def __init__( self , *__a , **__a) -> int:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , __a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model(**__a).logits
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 100 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[int] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
SCREAMING_SNAKE_CASE_: Dict ={
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class __A ( _lowerCAmelCase ):
a__ : List[Any] = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = ["""input_ids""", """attention_mask"""]
def __init__(self : str , __a : int , __a : List[Any] , __a : Optional[Any]="replace" , __a : Tuple="<s>" , __a : Dict="</s>" , __a : Optional[int]="</s>" , __a : List[Any]="<s>" , __a : Any="<unk>" , __a : Tuple="<pad>" , __a : Tuple="<mask>" , __a : Optional[Any]=False , **__a : Optional[int] , ):
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _lowercase (self : int ):
return len(self.encoder )
def _lowercase (self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase (self : Optional[Any] , __a : int ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(__a )
UpperCAmelCase_ = get_pairs(__a )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(__a ):
try:
UpperCAmelCase_ = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(__a )
UpperCAmelCase_ = new_word
if len(__a ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(__a )
UpperCAmelCase_ = " ".join(__a )
UpperCAmelCase_ = word
return word
def _lowercase (self : Any , __a : Dict ):
UpperCAmelCase_ = []
for token in re.findall(self.pat , __a ):
UpperCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(" " ) )
return bpe_tokens
def _lowercase (self : Optional[Any] , __a : List[Any] ):
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def _lowercase (self : Union[str, Any] , __a : Dict ):
return self.decoder.get(__a )
def _lowercase (self : int , __a : int ):
UpperCAmelCase_ = "".join(__a )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase (self : Optional[Any] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
UpperCAmelCase_ = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase (self : Any , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase (self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def _lowercase (self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self : str , __a : int , __a : Optional[Any]=False , **__a : int ):
UpperCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = " " + text
return (text, kwargs)
| 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ):
return int((input_a, input_a).count(0 ) == 0 )
def UpperCAmelCase_ ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 223 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase_ : str = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : str=1 ):
UpperCamelCase_: List[str] = tokenizer
UpperCamelCase_: str = dataset
UpperCamelCase_: List[str] = len(snake_case_ ) if n_tasks is None else n_tasks
UpperCamelCase_: str = n_copies
def __iter__( self : Tuple ):
UpperCamelCase_: Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = start_length
UpperCamelCase_: Dict = eof_strings
UpperCamelCase_: List[str] = tokenizer
def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase_: Dict = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(snake_case_ )
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = re.split("""(%s)""" % """|""".join(lowerCamelCase ) , lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=20 , **lowerCamelCase ) -> int:
UpperCamelCase_: str = defaultdict(lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCamelCase ) ):
with torch.no_grad():
UpperCamelCase_: Optional[int] = batch["""ids"""].shape[-1]
UpperCamelCase_: Dict = accelerator.unwrap_model(lowerCamelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=lowerCamelCase , **lowerCamelCase )
# each task is generated batch_size times
UpperCamelCase_: Optional[int] = batch["""task_id"""].repeat(lowerCamelCase )
UpperCamelCase_: int = accelerator.pad_across_processes(
lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase_, UpperCamelCase_: Tuple = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase_: Tuple = generated_tokens.cpu().numpy()
UpperCamelCase_: Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCamelCase , lowerCamelCase ):
gen_token_dict[task].append(lowerCamelCase )
UpperCamelCase_: Dict = [[] for _ in range(lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase_: Any = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
code_gens[task].append(remove_last_block(lowerCamelCase ) )
return code_gens
def A__ ( ) -> Union[str, Any]:
# Setup configuration
UpperCamelCase_: Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_: str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase_: List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase_: Union[str, Any] = """false"""
if args.num_workers is None:
UpperCamelCase_: Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase_: List[Any] = Accelerator()
set_seed(args.seed , device_specific=lowerCamelCase )
# Load model and tokenizer
UpperCamelCase_: Any = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase_: Union[str, Any] = tokenizer.eos_token
UpperCamelCase_: Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase_: Union[str, Any] = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase , lowerCamelCase )] ),
}
# Load evaluation dataset and metric
UpperCamelCase_: Any = load_dataset("""openai_humaneval""" )
UpperCamelCase_: Union[str, Any] = load_metric("""code_eval""" )
UpperCamelCase_: Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
UpperCamelCase_: List[Any] = args.n_samples // args.batch_size
UpperCamelCase_: Any = TokenizedDataset(lowerCamelCase , human_eval["""test"""] , n_copies=lowerCamelCase , n_tasks=lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase_: Optional[int] = DataLoader(lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase_: List[str] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
UpperCamelCase_, UpperCamelCase_: Dict = accelerator.prepare(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = complete_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , n_tasks=lowerCamelCase , batch_size=args.batch_size , **lowerCamelCase , )
if accelerator.is_main_process:
UpperCamelCase_: List[Any] = []
for task in tqdm(range(lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = human_eval["""test"""][task]["""test"""]
UpperCamelCase_: Optional[int] = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase_, UpperCamelCase_: str = code_eval_metric.compute(
references=lowerCamelCase , predictions=lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 223 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = DistilBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case = model(lowercase__ , lowercase__ )
snake_case = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = DistilBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = DistilBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = DistilBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = DistilBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_choices
snake_case = DistilBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
((snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case)) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowerCAmelCase : Dict = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : int = True
def snake_case ( self ):
"""simple docstring"""
snake_case = DistilBertModelTester(self )
snake_case = ConfigTester(self , config_class=lowercase__ , dim=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__ )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__ )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__ )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__ )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__ )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__ )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = DistilBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
snake_case = True
snake_case = model_class(config=lowercase__ )
snake_case = self._prepare_for_class(lowercase__ , lowercase__ )
snake_case = torch.jit.trace(
lowercase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ , os.path.join(lowercase__ , 'traced_model.pt' ) )
snake_case = torch.jit.load(os.path.join(lowercase__ , 'traced_model.pt' ) , map_location=lowercase__ )
loaded(inputs_dict['input_ids'].to(lowercase__ ) , inputs_dict['attention_mask'].to(lowercase__ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = DistilBertModel.from_pretrained('distilbert-base-uncased' )
snake_case = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case = model(lowercase__ , attention_mask=lowercase__ )[0]
snake_case = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowercase__ )
snake_case = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase__ , atol=1E-4 ) )
| 150 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KT''')
lowerCAmelCase__ = TypeVar('''VT''')
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : KT | str = "root" ,lowercase__ : VT | None = None ):
__lowercase = key
__lowercase = value
__lowercase = []
def __repr__( self : Tuple ):
return F"Node({self.key}: {self.value})"
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return len(self.forward )
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : float = 0.5 ,lowercase__ : int = 1_6 ):
__lowercase = Node[KT, VT]()
__lowercase = 0
__lowercase = p
__lowercase = max_level
def __str__( self : List[str] ):
__lowercase = list(self )
if len(lowercase__ ) == 0:
return F"SkipList(level={self.level})"
__lowercase = max((len(str(lowercase__ ) ) for item in items) ,default=4 )
__lowercase = max(lowercase__ ,4 ) + 4
__lowercase = self.head
__lowercase = []
__lowercase = node.forward.copy()
lines.append(F"[{node.key}]".ljust(lowercase__ ,'''-''' ) + '''* ''' * len(lowercase__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
while len(node.forward ) != 0:
__lowercase = node.forward[0]
lines.append(
F"[{node.key}]".ljust(lowercase__ ,'''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
__lowercase = node.forward
lines.append('''None'''.ljust(lowercase__ ) + '''* ''' * len(lowercase__ ) )
return F"SkipList(level={self.level})\n" + "\n".join(lowercase__ )
def __iter__( self : List[str] ):
__lowercase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowercase = node.forward[0]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ):
__lowercase = []
__lowercase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowercase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowercase = node.forward[i]
else:
__lowercase = update_node.forward[:i]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
__lowercase = value
else:
__lowercase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,lowercase__ ):
update_vector.append(self.head )
__lowercase = level
__lowercase = Node(lowercase__ ,lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
__lowercase = new_node
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
assert skip_list.find('''Some key''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(A__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _A ( ):
"""simple docstring"""
def is_sorted(A__ ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
__lowercase = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def _A ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 | 0 |
'''simple docstring'''
import inspect
import unittest
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def A ( self : Optional[int] ):
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase = inspect.getmembers(UpperCamelCase__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase = 'k-diffusion'
elif backend == "invisible_watermark":
UpperCamelCase = 'invisible-watermark'
assert backend in deps, f"""{backend} is not in the deps table!"""
| 249 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = int('0b' + data[0] + data[-1] , 2 )
UpperCamelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(A__ , A__ )
UpperCamelCase = xor(A__ , A__ )
UpperCamelCase = apply_sbox(A__ , temp[:4] ) # noqa: E741
UpperCamelCase = apply_sbox(A__ , temp[4:] )
UpperCamelCase = '0' * (2 - len(A__ )) + l # noqa: E741
UpperCamelCase = '0' * (2 - len(A__ )) + r
UpperCamelCase = apply_table(l + r , A__ )
UpperCamelCase = xor(A__ , A__ )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : str = input("Enter 10 bit key: ")
_lowerCamelCase : Optional[Any] = input("Enter 8 bit message: ")
_lowerCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Union[str, Any] = [2, 4, 3, 1]
_lowerCamelCase : int = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : Any = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : str = apply_table(key, paa_table)
_lowerCamelCase : str = temp[:5]
_lowerCamelCase : Any = temp[5:]
_lowerCamelCase : Dict = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
_lowerCamelCase : Optional[int] = left_shift(left)
_lowerCamelCase : Union[str, Any] = left_shift(right)
_lowerCamelCase : Tuple = left_shift(left)
_lowerCamelCase : Optional[int] = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Dict = apply_table(message, IP)
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Any = temp[4:] + temp[:4]
_lowerCamelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowerCamelCase : List[str] = apply_table(CT, IP)
_lowerCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = temp[4:] + temp[:4]
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 249 | 1 |
"""simple docstring"""
from string import ascii_uppercase
lowercase_ = {char: i for i, char in enumerate(ascii_uppercase)}
lowercase_ = dict(enumerate(ascii_uppercase))
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = len(snake_case__ )
__A = 0
while True:
if x == i:
__A = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = ''''''
__A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__A = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = ''''''
__A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__A = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''THE GERMAN ATTACK'''
__A = '''SECRET'''
__A = generate_key(snake_case__ , snake_case__ )
__A = cipher_text(snake_case__ , snake_case__ )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(snake_case__ , snake_case__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 266 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowercase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_lowercase = '''sshleifer/student_marian_en_ro_6_1'''
_lowercase = '''sshleifer/tiny-mbart'''
@require_torch
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any]=False ,A_ : Optional[int]=None ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Union[str, Any]=True ,A_ : List[str]=True ,) -> Tuple:
A = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=A_ ,num_train_epochs=1 ,distributed=A_ ,extra_args_str=A_ ,predict_with_generate=A_ ,do_train=A_ ,do_eval=A_ ,do_predict=A_ ,)
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
if not do_eval:
return
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
self.run_seqaseq_quick(distributed=A_ )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=A_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.run_seqaseq_quick(
distributed=A_ ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=A_ )
@require_apex
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A_ ,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
A = experiments[experiment_id]
A = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
A = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A_ ,extra_args_str=data['extra_args_str'] )
A = len(re.findall(A_ ,cl.err ) )
self.assertEqual(A_ ,data['n_matches'] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=10 ,distributed=A_ ,)
# Check metrics
A = TrainerState.load_from_json(os.path.join(A_ ,'trainer_state.json' ) ).log_history
A = [log for log in logs if 'eval_loss' in log.keys()]
A = eval_metrics[0]
A = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,A_ )
# test if do_predict saves generations and metrics
A = os.listdir(A_ )
A = {os.path.basename(A_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A_ : str ) -> Tuple[int, float]:
A = '--skip_memory_metrics 0'
A = self.run_trainer(
max_len=128 ,model_name=A_ ,learning_rate=3e-4 ,num_train_epochs=1 ,optim=A_ ,distributed=A_ ,extra_args_str=A_ ,do_eval=A_ ,do_predict=A_ ,n_gpus_to_use=1 ,)
# Check metrics
A = TrainerState.load_from_json(Path(A_ ,'trainer_state.json' ) ).log_history
A = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
A = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
A = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A , A , A = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A = gpu_peak_mem_orig + gpu_alloc_mem_orig
A = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A_ ,A_ ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' ,)
self.assertGreater(
A_ ,A_ ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' ,)
self.assertEqual(
A_ ,A_ ,F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : str ,A_ : int ,A_ : float = 3e-3 ,A_ : str = "adafactor" ,A_ : bool = False ,A_ : str = None ,A_ : int = 0 ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : bool = True ,A_ : int = None ,) -> Dict:
A = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
A = self.get_auto_remove_tmp_dir()
A = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A_ )}\n '.split()
A = '\n --do_predict\n '.split()
A = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A = get_gpu_count()
A = get_torch_dist_unique_port()
A = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A_ ,env=self.get_env() )
else:
A = ['run_translation.py'] + args
with patch.object(A_ ,'argv' ,A_ ):
main()
return output_dir | 74 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
model.train()
__lowerCAmelCase : Optional[int] = model(_UpperCamelCase )
__lowerCAmelCase : List[str] = F.mse_loss(_UpperCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
set_seed(42 )
__lowerCAmelCase : Optional[Any] = RegressionModel()
__lowerCAmelCase : str = deepcopy(_UpperCamelCase )
__lowerCAmelCase : str = RegressionDataset(length=80 )
__lowerCAmelCase : Tuple = DataLoader(_UpperCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
__lowerCAmelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase : List[Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__lowerCAmelCase : Dict = LambdaLR(_UpperCamelCase , lr_lambda=lambda _UpperCamelCase : epoch**0.65 )
__lowerCAmelCase : Tuple = LambdaLR(_UpperCamelCase , lr_lambda=lambda _UpperCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase , __lowerCAmelCase : str = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCAmelCase (_UpperCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = get_training_setup(_UpperCamelCase )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase : str = next(iter(_UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
# Sync grads
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowerCAmelCase : Union[str, Any] = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
def __lowerCAmelCase (_UpperCamelCase ):
# Test on distributed setup that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = get_training_setup(_UpperCamelCase )
# Use a single batch
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = next(iter(_UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
# Sync grads
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowerCAmelCase : Union[str, Any] = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
def __lowerCAmelCase (_UpperCamelCase=False , _UpperCamelCase=False ):
__lowerCAmelCase : Dict = Accelerator(
split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = get_training_setup(_UpperCamelCase )
for iteration, batch in enumerate(_UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__lowerCAmelCase : Dict = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
GradientState._reset_state()
def __lowerCAmelCase (_UpperCamelCase=False , _UpperCamelCase=False ):
__lowerCAmelCase : Tuple = Accelerator(
split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = get_training_setup(_UpperCamelCase , _UpperCamelCase )
for iteration, batch in enumerate(_UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : str = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCAmelCase , __lowerCAmelCase : str = accelerator.gather((ddp_input, ddp_target) )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_UpperCamelCase ):
step_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__lowerCAmelCase : Dict = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCAmelCase ():
__lowerCAmelCase : Union[str, Any] = Accelerator()
__lowerCAmelCase : str = RegressionDataset(length=80 )
__lowerCAmelCase : Optional[Any] = DataLoader(_UpperCamelCase , batch_size=16 )
__lowerCAmelCase : List[str] = RegressionDataset(length=96 )
__lowerCAmelCase : Optional[int] = DataLoader(_UpperCamelCase , batch_size=16 )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCamelCase )
if iteration < len(_UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCamelCase )
if batch_num < len(_UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = Accelerator()
__lowerCAmelCase : Dict = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(_UpperCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(_UpperCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_UpperCamelCase , _UpperCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 182 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase)
class A__ ( _lowerCamelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True})
A_ : ClassVar[Features] = Features({'text': Value('string')})
A_ : ClassVar[Features] = Features({'labels': ClassLabel})
A_ : str = "text"
A_ : str = "labels"
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase : Any = copy.deepcopy(self )
__lowerCAmelCase : Dict = self.label_schema.copy()
__lowerCAmelCase : List[Any] = features[self.label_column]
__lowerCAmelCase : Dict = label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
} | 182 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =CanineTokenizer
lowercase : Tuple =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase )
lowerCamelCase_ =1_024
return tokenizer
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.canine_tokenizer
lowerCamelCase_ =['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowerCamelCase_ =[57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ =tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertEqual((2, 39), batch.input_ids.shape )
self.assertEqual((2, 39), batch.attention_mask.shape )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.canine_tokenizer
lowerCamelCase_ =['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowerCamelCase_ =tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''', lowerCAmelCase )
self.assertIn('''attention_mask''', lowerCAmelCase )
self.assertIn('''token_type_ids''', lowerCAmelCase )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.canine_tokenizer
lowerCamelCase_ =[
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowerCamelCase_ =tokenizer(
text_target=lowerCAmelCase, max_length=32, padding='''max_length''', truncation=lowerCAmelCase, return_tensors='''pt''' )
self.assertEqual(32, targets['''input_ids'''].shape[1] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowerCamelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =tokenizer.__class__.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =after_tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
lowerCamelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ =''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ =chr(0xE_007 )
additional_special_tokens.append(lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =tokenizer.__class__.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =after_tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertIn(lowerCAmelCase, after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowerCamelCase_ =tokenizer.__class__.from_pretrained(lowerCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_, lowerCamelCase_ =self.get_clean_sequence(lowerCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ =0xE_005
lowerCamelCase_ =chr(lowerCAmelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 1 )
lowerCamelCase_ =tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase, input_encoded + special_token_id )
lowerCamelCase_ =tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ =chr(0xE_005 )
lowerCamelCase_ =chr(0xE_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 1 )
self.assertEqual(len(lowerCAmelCase ), 1 )
self.assertEqual(token_a[0], lowerCAmelCase )
self.assertEqual(token_a[0], lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ =0xE_006
lowerCamelCase_ =chr(lowerCAmelCase )
lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase )
tokenizer.from_pretrained(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase, '''special_tokens_map.json''' ), encoding='''utf-8''' ) as json_file:
lowerCamelCase_ =json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase, '''tokenizer_config.json''' ), encoding='''utf-8''' ) as json_file:
lowerCamelCase_ =json.load(lowerCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ =0xE_006
lowerCamelCase_ =chr(lowerCAmelCase )
lowerCamelCase_ =[new_token_a]
lowerCamelCase_ =[new_token_a]
with open(os.path.join(lowerCAmelCase, '''special_tokens_map.json''' ), '''w''', encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase, lowerCAmelCase )
with open(os.path.join(lowerCAmelCase, '''tokenizer_config.json''' ), '''w''', encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase, lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ =tokenizer_class.from_pretrained(lowerCAmelCase, extra_ids=0 )
self.assertIn(lowerCAmelCase, tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ), )
lowerCamelCase_ =0xE_007
lowerCamelCase_ =chr(lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ =[AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase )]
lowerCamelCase_ =tokenizer_class.from_pretrained(
lowerCAmelCase, additional_special_tokens=lowerCAmelCase, extra_ids=0 )
self.assertIn(lowerCAmelCase, tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ ='''hello world'''
if self.space_between_special_tokens:
lowerCamelCase_ ='''[CLS] hello world [SEP]'''
else:
lowerCamelCase_ =input
lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(lowerCAmelCase, spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase, [output, output.lower()] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase_ ='''a'''
lowerCamelCase_ =ord(lowerCAmelCase )
for attr in attributes_list:
setattr(lowerCAmelCase, attr + '''_id''', lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase, lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase, attr + '''_id''' ), lowerCAmelCase )
setattr(lowerCAmelCase, attr + '''_id''', lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase, lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(getattr(lowerCAmelCase, attr + '''_id''' ), lowerCAmelCase )
setattr(lowerCAmelCase, '''additional_special_tokens_ids''', [] )
self.assertListEqual(getattr(lowerCAmelCase, '''additional_special_tokens''' ), [] )
self.assertListEqual(getattr(lowerCAmelCase, '''additional_special_tokens_ids''' ), [] )
lowerCamelCase_ =0xE_006
lowerCamelCase_ =chr(lowerCAmelCase )
setattr(lowerCAmelCase, '''additional_special_tokens_ids''', [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase, '''additional_special_tokens''' ), [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase, '''additional_special_tokens_ids''' ), [additional_special_token_id] )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
| 75 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if name is None:
UpperCamelCase = None
else:
UpperCamelCase = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ":" , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ":" , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 2 )
UpperCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
UpperCamelCase = param.transpose(0 , 1 ).contiguous()
UpperCamelCase = param.view(*_SCREAMING_SNAKE_CASE )
return param
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
# old versions did not store training args
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase = ds_args.padded_vocab_size
UpperCamelCase = ds_args.max_position_embeddings
UpperCamelCase = ds_args.hidden_size
UpperCamelCase = ds_args.num_layers
UpperCamelCase = ds_args.num_attention_heads
UpperCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase = config.n_head
# The hidden_size per head.
UpperCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase = input_state_dict["checkpoint_version"]
else:
UpperCamelCase = 0.0
# The model.
UpperCamelCase = input_state_dict["model"]
# The language model.
UpperCamelCase = model["language_model"]
# The embeddings.
UpperCamelCase = lm["embedding"]
# The word embeddings.
UpperCamelCase = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase = word_embeddings[: config.vocab_size, :]
UpperCamelCase = word_embeddings
# The position embeddings.
UpperCamelCase = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
UpperCamelCase = pos_embeddings
# The transformer.
UpperCamelCase = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase = m.group(3 )
# The name of the layer.
UpperCamelCase = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase = masked_bias
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
UpperCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase = megatron_to_transformers[op_name]
UpperCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase = transformer["final_layernorm.weight"]
UpperCamelCase = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase = word_embeddings
# It should be done!
return output_state_dict
def a__ ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_SCREAMING_SNAKE_CASE , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_SCREAMING_SNAKE_CASE , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase = parser.parse_args()
# Extract the basename.
UpperCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
else:
UpperCamelCase = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase = input_state_dict.get("args" , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase = "gelu_new"
else:
UpperCamelCase = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
UpperCamelCase = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
UpperCamelCase = "gpt2"
UpperCamelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = type(_SCREAMING_SNAKE_CASE ).__name__
UpperCamelCase = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 153 | 0 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Image ) -> str:
__A : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCAmelCase ( __snake_case : Image ) -> Dict:
__A : str = np.array(__snake_case )
__A : Optional[int] = npimg.shape
return {"hash": hashimage(__snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = pipeline('mask-generation' , model='facebook/sam-vit-huge')
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = 'facebook/sam-vit-huge'
__A : Any = pipeline('mask-generation' , model=_UpperCAmelCase)
__A : Dict = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
__A : Optional[int] = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
] , ) | 363 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ : Optional[int] = HfApi()
lowercase__ : Dict = {}
# fmt: off
lowercase__ : List[str] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase__ : Tuple = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase__ : Optional[Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase__ : List[Any] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase__ : Dict = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase__ : Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase__ : List[Any] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase__ : List[str] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase__ : Dict = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase__ : Optional[int] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase__ : List[str] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase__ : Optional[int] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase__ : int = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase__ : int = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase__ : List[Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase__ : str = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ : int = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
lowercase__ : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowercase__ : Tuple = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ : Tuple = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""") | 190 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337 | '''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : List[Any] = "AAPL" ):
"""simple docstring"""
_a : Union[str, Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_a : Tuple = BeautifulSoup(requests.get(lowercase__ ).text , 'html.parser' )
_a : Optional[Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
_a = 42
_a = 42
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =[[] for _ in range(lowerCAmelCase )]
_lowercase =size
def __getitem__( self , lowerCAmelCase ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def A__ ( self ) -> Any:
'''simple docstring'''
return self._size
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCAmelCase , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
'''simple docstring'''
_lowercase =deque([start_vertex] )
_lowercase =[None] * self.size
_lowercase =0
while queue:
_lowercase =queue.popleft()
_lowercase =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowercase =current_distance + edge.weight
_lowercase =distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase , lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
_lowercase =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
from __future__ import annotations
def a ( A__ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_lowercase =nums[0]
_lowercase =0
for num in nums[1:]:
_lowercase , _lowercase =(
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( A__ ):
UpperCamelCase = "gpt_neox_japanese"
def __init__( self : Tuple , A : Any=3_20_00 , A : Optional[Any]=25_60 , A : Dict=32 , A : Optional[Any]=32 , A : Union[str, Any]=4 , A : List[Any]="gelu" , A : int=1.0_0 , A : Tuple=1_00_00 , A : str=20_48 , A : Tuple=0.0_2 , A : Union[str, Any]=1E-5 , A : int=True , A : Optional[int]=3_19_96 , A : Tuple=3_19_99 , A : Optional[int]=0.1 , A : Optional[Any]=0.0 , **A : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_multiple_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = rotary_pct
_UpperCAmelCase = rotary_emb_base
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = hidden_dropout
| 351 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCAmelCase__ = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(A )
class __lowerCAmelCase ( A ):
UpperCamelCase = '''rag'''
UpperCamelCase = True
def __init__( self : str , A : int=None , A : Dict=True , A : int=None , A : Any=None , A : Any=None , A : Any=None , A : int=None , A : str=" / " , A : int=" // " , A : Any=5 , A : Optional[Any]=3_00 , A : Optional[Any]=7_68 , A : Optional[int]=8 , A : Union[str, Any]="wiki_dpr" , A : int="train" , A : Tuple="compressed" , A : str=None , A : Any=None , A : Tuple=False , A : List[str]=False , A : Tuple=0.0 , A : Any=True , A : int=False , A : Dict=False , A : List[str]=False , A : Optional[Any]=True , A : Tuple=None , **A : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=A , pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , forced_eos_token_id=A , is_encoder_decoder=A , prefix=A , vocab_size=A , **A , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCAmelCase = kwargs.pop('question_encoder')
_UpperCAmelCase = question_encoder_config.pop('model_type')
_UpperCAmelCase = kwargs.pop('generator')
_UpperCAmelCase = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase = AutoConfig.for_model(A , **A)
_UpperCAmelCase = AutoConfig.for_model(A , **A)
_UpperCAmelCase = reduce_loss
_UpperCAmelCase = label_smoothing
_UpperCAmelCase = exclude_bos_score
_UpperCAmelCase = do_marginalize
_UpperCAmelCase = title_sep
_UpperCAmelCase = doc_sep
_UpperCAmelCase = n_docs
_UpperCAmelCase = max_combined_length
_UpperCAmelCase = dataset
_UpperCAmelCase = dataset_split
_UpperCAmelCase = index_name
_UpperCAmelCase = retrieval_vector_size
_UpperCAmelCase = retrieval_batch_size
_UpperCAmelCase = passages_path
_UpperCAmelCase = index_path
_UpperCAmelCase = use_dummy_dataset
_UpperCAmelCase = output_retrieved
_UpperCAmelCase = do_deduplication
_UpperCAmelCase = use_cache
if self.forced_eos_token_id is None:
_UpperCAmelCase = getattr(self.generator , 'forced_eos_token_id' , A)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , A : PretrainedConfig , A : PretrainedConfig , **A : Optional[Any]) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A)
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__)
_UpperCAmelCase = self.question_encoder.to_dict()
_UpperCAmelCase = self.generator.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 290 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
SCREAMING_SNAKE_CASE__ : List[str] = int(__lowerCAmelCase )
# Initialize Result
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
a :Any = []
a :str = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
a :Dict = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'Denomination {i}: ').strip()))
a :Optional[Any] = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
a :Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
a :List[str] = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'Following is minimal change for {value}: ')
a :Dict = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 132 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a :Optional[Any] = logging.get_logger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , **_a ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 132 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Dict , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : str=None , snake_case_ : Optional[int]=1 ):
snake_case__ : int = tokenizer
snake_case__ : Dict = dataset
snake_case__ : Optional[int] = len(snake_case_ ) if n_tasks is None else n_tasks
snake_case__ : List[str] = n_copies
def __iter__( self : Union[str, Any] ):
snake_case__ : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
snake_case__ : Optional[int] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[str] ):
snake_case__ : Dict = start_length
snake_case__ : Optional[Any] = eof_strings
snake_case__ : List[str] = tokenizer
def __call__( self : List[str] , snake_case_ : Tuple , snake_case_ : Optional[int] , **snake_case_ : Optional[Any] ):
snake_case__ : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case__ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(snake_case_ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : List[Any] = re.split("""(%s)""" % """|""".join(_lowerCAmelCase ) , _lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=20 , **_lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = defaultdict(_lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCAmelCase ) ):
with torch.no_grad():
snake_case__ : Optional[int] = batch["""ids"""].shape[-1]
snake_case__ : List[Any] = accelerator.unwrap_model(_lowerCAmelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=_lowerCAmelCase , **_lowerCAmelCase )
# each task is generated batch_size times
snake_case__ : Optional[int] = batch["""task_id"""].repeat(_lowerCAmelCase )
snake_case__ : List[Any] = accelerator.pad_across_processes(
_lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
snake_case__ : Optional[Any] = accelerator.gather((generated_tokens, generated_tasks) )
snake_case__ : Optional[Any] = generated_tokens.cpu().numpy()
snake_case__ : Optional[int] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCAmelCase , _lowerCAmelCase ):
gen_token_dict[task].append(_lowerCAmelCase )
snake_case__ : Optional[Any] = [[] for _ in range(_lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case__ : int = tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
code_gens[task].append(remove_last_block(_lowerCAmelCase ) )
return code_gens
def __snake_case( ) -> Union[str, Any]:
# Setup configuration
snake_case__ : List[str] = HfArgumentParser(_lowerCAmelCase )
snake_case__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case__ : Union[str, Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case__ : Union[str, Any] = """false"""
if args.num_workers is None:
snake_case__ : int = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case__ : int = Accelerator()
set_seed(args.seed , device_specific=_lowerCAmelCase )
# Load model and tokenizer
snake_case__ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case__ : Dict = tokenizer.eos_token
snake_case__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case__ : Any = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCAmelCase , _lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
snake_case__ : Optional[int] = load_dataset("""openai_humaneval""" )
snake_case__ : List[Any] = load_metric("""code_eval""" )
snake_case__ : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
snake_case__ : Dict = args.n_samples // args.batch_size
snake_case__ : int = TokenizedDataset(_lowerCAmelCase , human_eval["""test"""] , n_copies=_lowerCAmelCase , n_tasks=_lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case__ : Optional[Any] = DataLoader(_lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case__ : List[Any] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
snake_case__ : Dict = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : str = complete_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , n_tasks=_lowerCAmelCase , batch_size=args.batch_size , **_lowerCAmelCase , )
if accelerator.is_main_process:
snake_case__ : str = []
for task in tqdm(range(_lowerCAmelCase ) ):
snake_case__ : Any = human_eval["""test"""][task]["""test"""]
snake_case__ : int = f"check({human_eval['test'][task]['entry_point']})"
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
snake_case__ : Optional[Any] = code_eval_metric.compute(
references=_lowerCAmelCase , predictions=_lowerCAmelCase , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 361 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a = logging.getLogger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , snake_case_ : Tuple=None ):
super().__init__(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , index=snake_case_ , init_retrieval=snake_case_ , )
snake_case__ : int = None
def lowerCamelCase ( self : int , snake_case_ : int ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
snake_case__ : Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
snake_case__ : int = str(distributed_port + 1 )
snake_case__ : List[str] = dist.new_group(ranks=snake_case_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase ( self : Optional[Any] ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase ( self : int , snake_case_ : str , snake_case_ : int , snake_case_ : int=torch.floataa ):
snake_case__ : str = torch.empty(snake_case_ , dtype=snake_case_ )
dist.scatter(snake_case_ , src=0 , scatter_list=snake_case_ , group=self.process_group )
return target_tensor
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
snake_case__ : Dict = next((addr for addr in addrs if addr.startswith("""e""" )) , snake_case_ )
return ifname
def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : int ):
# single GPU training
if not dist.is_initialized():
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(snake_case_ , snake_case_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case_ )
# distributed training
snake_case__ : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
snake_case__ : str = None
if self._is_main():
snake_case__ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(snake_case_ )]
dist.gather(torch.tensor(snake_case_ ) , dst=0 , gather_list=snake_case_ , group=self.process_group )
# scatter logic
snake_case__ : Union[str, Any] = question_hidden_states.shape[0]
snake_case__ : List[str] = []
snake_case__ : Dict = []
if self._is_main():
assert len(snake_case_ ) == world_size
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(torch.cat(snake_case_ ).numpy() , snake_case_ )
snake_case__ , snake_case__ : Dict = torch.tensor(snake_case_ ), torch.tensor(snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._scattered(snake_case_ , [n_queries, n_docs] , target_type=torch.intaa )
snake_case__ : Dict = self._scattered(snake_case_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case_ )
| 43 | 0 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = (
os.path.join(lowerCAmelCase__ , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__SCREAMING_SNAKE_CASE = Extractor
def snake_case_ ( self , lowerCAmelCase__):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__SCREAMING_SNAKE_CASE = os.path.abspath(lowerCAmelCase__)
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase__))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
return force_extract or (
not os.path.isfile(lowerCAmelCase__) and not (os.path.isdir(lowerCAmelCase__) and os.listdir(lowerCAmelCase__))
)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False):
__SCREAMING_SNAKE_CASE = self.extractor.infer_extractor_format(lowerCAmelCase__)
if not extractor_format:
return input_path
__SCREAMING_SNAKE_CASE = self._get_output_path(lowerCAmelCase__)
if self._do_extract(lowerCAmelCase__ , lowerCAmelCase__):
self.extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
return output_path
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@classmethod
@abstractmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
...
@staticmethod
@abstractmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
...
class SCREAMING_SNAKE_CASE_ ( __a , __a ):
"""simple docstring"""
__lowercase : List[bytes] = []
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
with open(lowerCAmelCase__ , """rb""") as f:
return f.read(lowerCAmelCase__)
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = b""):
if not magic_number:
__SCREAMING_SNAKE_CASE = max(len(lowerCAmelCase__) for cls_magic_number in cls.magic_numbers)
try:
__SCREAMING_SNAKE_CASE = cls.read_magic_number(lowerCAmelCase__ , lowerCAmelCase__)
except OSError:
return False
return any(magic_number.startswith(lowerCAmelCase__) for cls_magic_number in cls.magic_numbers)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
return tarfile.is_tarfile(lowerCAmelCase__)
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
def resolved(lowerCAmelCase__) -> str:
return os.path.realpath(os.path.abspath(lowerCAmelCase__))
def badpath(lowerCAmelCase__ , lowerCAmelCase__) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCAmelCase__ , lowerCAmelCase__)).startswith(lowerCAmelCase__)
def badlink(lowerCAmelCase__ , lowerCAmelCase__) -> bool:
# Links are interpreted relative to the directory containing the link
__SCREAMING_SNAKE_CASE = resolved(os.path.join(lowerCAmelCase__ , os.path.dirname(info.name)))
return badpath(info.linkname , base=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = resolved(lowerCAmelCase__)
for finfo in members:
if badpath(finfo.name , lowerCAmelCase__):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)")
elif finfo.issym() and badlink(lowerCAmelCase__ , lowerCAmelCase__):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")
elif finfo.islnk() and badlink(lowerCAmelCase__ , lowerCAmelCase__):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")
else:
yield finfo
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tarfile.open(lowerCAmelCase__)
tar_file.extractall(lowerCAmelCase__ , members=TarExtractor.safemembers(lowerCAmelCase__ , lowerCAmelCase__))
tar_file.close()
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : int = [B'''\x1F\x8B''']
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
with gzip.open(lowerCAmelCase__ , """rb""") as gzip_file:
with open(lowerCAmelCase__ , """wb""") as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = b""):
if super().is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCAmelCase__ , """rb""") as fp:
__SCREAMING_SNAKE_CASE = _EndRecData(lowerCAmelCase__)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__SCREAMING_SNAKE_CASE = fp.read(lowerCAmelCase__) # CD is where we expect it to be
if len(lowerCAmelCase__) == sizeCentralDir:
__SCREAMING_SNAKE_CASE = struct.unpack(lowerCAmelCase__ , lowerCAmelCase__) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with zipfile.ZipFile(lowerCAmelCase__ , """r""") as zip_file:
zip_file.extractall(lowerCAmelCase__)
zip_file.close()
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[str] = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
with lzma.open(lowerCAmelCase__) as compressed_file:
with open(lowerCAmelCase__ , """wb""") as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Any = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""")
import rarfile
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rarfile.RarFile(lowerCAmelCase__)
rf.extractall(lowerCAmelCase__)
rf.close()
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Union[str, Any] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""")
import zstandard as zstd
__SCREAMING_SNAKE_CASE = zstd.ZstdDecompressor()
with open(lowerCAmelCase__ , """rb""") as ifh, open(lowerCAmelCase__ , """wb""") as ofh:
dctx.copy_stream(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : int = [B'''\x42\x5A\x68''']
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
with bza.open(lowerCAmelCase__ , """rb""") as compressed_file:
with open(lowerCAmelCase__ , """wb""") as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Union[str, Any] = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""")
import pyazr
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with pyazr.SevenZipFile(lowerCAmelCase__ , """r""") as archive:
archive.extractall(lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = [B'''\x04\x22\x4D\x18''']
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""")
import lza.frame
with lza.frame.open(lowerCAmelCase__ , """rb""") as compressed_file:
with open(lowerCAmelCase__ , """wb""") as extracted_file:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case_ ( cls):
return max(
len(lowerCAmelCase__)
for extractor in cls.extractors.values()
if issubclass(lowerCAmelCase__ , lowerCAmelCase__)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__):
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase__ , magic_number_length=lowerCAmelCase__)
except OSError:
return b""
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = False):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = cls.infer_extractor_format(lowerCAmelCase__)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case_ ( cls , lowerCAmelCase__): # <Added version="2.4.0"/>
__SCREAMING_SNAKE_CASE = cls._get_magic_number_max_length()
__SCREAMING_SNAKE_CASE = cls._read_magic_number(lowerCAmelCase__ , lowerCAmelCase__)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCAmelCase__ , magic_number=lowerCAmelCase__):
return extractor_format
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = "deprecated" , ):
os.makedirs(os.path.dirname(lowerCAmelCase__) , exist_ok=lowerCAmelCase__)
# Prevent parallel extractions
__SCREAMING_SNAKE_CASE = str(Path(lowerCAmelCase__).with_suffix(""".lock"""))
with FileLock(lowerCAmelCase__):
shutil.rmtree(lowerCAmelCase__ , ignore_errors=lowerCAmelCase__)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCAmelCase__ , lowerCAmelCase__): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = extractor if extractor != """deprecated""" else extractor_format
else:
__SCREAMING_SNAKE_CASE = cls.extractors[extractor_format]
return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__)
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowerCAmelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCAmelCase__):
return extractor.extract(lowerCAmelCase__ , lowerCAmelCase__)
| 100 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Dict = '''informer'''
__lowercase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , lowerCAmelCase__ = "prob" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
# Informer
__SCREAMING_SNAKE_CASE = attention_type
__SCREAMING_SNAKE_CASE = sampling_factor
__SCREAMING_SNAKE_CASE = distil
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 100 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase ( unittest.TestCase , lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_tool('text-classification' )
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=lowerCAmelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(lowerCAmelCase__ , 'positive' )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(lowerCAmelCase__ , 'positive' )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(lowerCAmelCase__ , 'positive' )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(lowerCAmelCase__ , 'positive' )
| 38 |
"""simple docstring"""
from __future__ import annotations
def lowercase (SCREAMING_SNAKE_CASE_ : list[int] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase : List[Any] = {
'''RUCAIBox/mvp''': 10_24,
}
class __lowercase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
UpperCamelCase : Union[str, Any] = MvpTokenizer
def __init__( self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> str:
'''simple docstring'''
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A ) != add_prefix_space:
lowerCamelCase = getattr(A , pre_tok_state.pop("""type""" ) )
lowerCamelCase = add_prefix_space
lowerCamelCase = pre_tok_class(**A )
lowerCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase = "post_processor"
lowerCamelCase = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase = tuple(state["""cls"""] )
lowerCamelCase = False
if state.get("""add_prefix_space""" , A ) != add_prefix_space:
lowerCamelCase = add_prefix_space
lowerCamelCase = True
if state.get("""trim_offsets""" , A ) != trim_offsets:
lowerCamelCase = trim_offsets
lowerCamelCase = True
if changes_to_apply:
lowerCamelCase = getattr(A , state.pop("""type""" ) )
lowerCamelCase = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
lowerCamelCase = value
def __A ( self , *A , **A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = kwargs.get("""is_split_into_words""" , A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*A , **A )
def __A ( self , *A , **A ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = kwargs.get("""is_split_into_words""" , A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*A , **A )
def __A ( self , A , A = None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self._tokenizer.model.save(A , name=A )
return tuple(A )
def __A ( self , A , A=None ) -> Any:
'''simple docstring'''
lowerCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , A , A = None ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 252 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]="pt" ):
lowercase_ :Dict = {"add_prefix_space": True} if isinstance(__lowerCamelCase ,__lowerCamelCase ) and not line.startswith(" " ) else {}
lowercase_ :str = padding_side
return tokenizer(
[line] ,max_length=__lowerCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str=None ,):
lowercase_ :Optional[int] = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str="train" , lowercase : Dict=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int="" , ):
"""simple docstring"""
super().__init__()
lowercase_ :List[Any] = Path(lowercase ).joinpath(type_path + ".source" )
lowercase_ :Dict = Path(lowercase ).joinpath(type_path + ".target" )
lowercase_ :Optional[int] = self.get_char_lens(self.src_file )
lowercase_ :List[str] = max_source_length
lowercase_ :str = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
lowercase_ :int = tokenizer
lowercase_ :Dict = prefix
if n_obs is not None:
lowercase_ :Union[str, Any] = self.src_lens[:n_obs]
lowercase_ :Optional[int] = src_lang
lowercase_ :str = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Tuple = index + 1 # linecache starts at 1
lowercase_ :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("\n" )
lowercase_ :List[str] = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ :List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
lowercase_ :int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
lowercase_ :List[str] = encode_line(lowercase , lowercase , self.max_source_length , "right" )
lowercase_ :Any = encode_line(lowercase , lowercase , self.max_target_length , "right" )
lowercase_ :Dict = source_inputs["input_ids"].squeeze()
lowercase_ :Tuple = target_inputs["input_ids"].squeeze()
lowercase_ :Optional[int] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( lowercase : Union[str, Any] ):
"""simple docstring"""
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def lowercase__ ( self : str , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = torch.stack([x["input_ids"] for x in batch] )
lowercase_ :Dict = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ :List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ :Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :Union[str, Any] = trim_batch(lowercase , lowercase )
lowercase_ , lowercase_ :Optional[Any] = trim_batch(lowercase , lowercase , attention_mask=lowercase )
lowercase_ :Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase : List[str] =getLogger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[str] = get_git_info()
save_json(__lowerCamelCase ,os.path.join(__lowerCamelCase ,"git_log.json" ) )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=4 ,**__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=__lowerCamelCase ,**__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( ):
lowercase_ :Dict = git.Repo(search_parent_directories=__lowerCamelCase )
lowercase_ :List[str] = {
"repo_id": str(__lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowerCamelCase : Callable ,__lowerCamelCase : Iterable ):
return list(map(__lowerCamelCase ,__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"wb" ) as f:
return pickle.dump(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def remove_articles(__lowerCamelCase : Optional[int] ):
return re.sub(r"\b(a|an|the)\b" ," " ,__lowerCamelCase )
def white_space_fix(__lowerCamelCase : Dict ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowercase_ :Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ):
lowercase_ :Tuple = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Dict = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Tuple = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowercase_ :Tuple = sum(common.values() )
if num_same == 0:
return 0
lowercase_ :Union[str, Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :List[Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase_ :Any = 0
for hypo, pred in zip(__lowerCamelCase ,__lowerCamelCase ):
em += exact_match_score(__lowerCamelCase ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def UpperCAmelCase_ ( __lowerCamelCase : str ):
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ):
lowercase_ :Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ :List[str] = "dropout_rate"
for p in extra_params:
if getattr(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and not hasattr(__lowerCamelCase ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
continue
lowercase_ :List[Any] = p if hasattr(__lowerCamelCase ,__lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase ,__lowerCamelCase ,getattr(__lowerCamelCase ,__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
return hparams, config
| 223 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ : str = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = ZeroShotClassificationPipeline(
model=__lowerCamelCase , tokenizer=__lowerCamelCase , candidate_labels=['polics', 'health'])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = classifier('Who are you voting for in 2020?' , candidate_labels='politics')
self.assertEqual(__lowerCamelCase , {'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase)]})
# No kwarg
__A : str = classifier('Who are you voting for in 2020?' , ['politics'])
self.assertEqual(__lowerCamelCase , {'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase)]})
__A : List[str] = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'])
self.assertEqual(__lowerCamelCase , {'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase)]})
__A : List[Any] = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health')
self.assertEqual(
__lowerCamelCase , {'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0)
__A : int = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'])
self.assertEqual(
__lowerCamelCase , {'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0)
__A : str = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}')
self.assertEqual(__lowerCamelCase , {'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase)]})
# https://github.com/huggingface/transformers/issues/13846
__A : Optional[int] = classifier(['I am happy'] , ['positive', 'negative'])
self.assertEqual(
__lowerCamelCase , [
{'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)]}
for i in range(1)
] , )
__A : Tuple = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'])
self.assertEqual(
__lowerCamelCase , [
{'sequence': ANY(__lowerCamelCase), 'labels': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)], 'scores': [ANY(__lowerCamelCase), ANY(__lowerCamelCase)]}
for i in range(2)
] , )
with self.assertRaises(__lowerCamelCase):
classifier('' , candidate_labels='politics')
with self.assertRaises(__lowerCamelCase):
classifier(__lowerCamelCase , candidate_labels='politics')
with self.assertRaises(__lowerCamelCase):
classifier('Who are you voting for in 2020?' , candidate_labels='')
with self.assertRaises(__lowerCamelCase):
classifier('Who are you voting for in 2020?' , candidate_labels=__lowerCamelCase)
with self.assertRaises(__lowerCamelCase):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(__lowerCamelCase):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=__lowerCamelCase , )
self.run_entailment_id(__lowerCamelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = zero_shot_classifier.model.config
__A : List[str] = config.labelaid
__A : int = zero_shot_classifier.entailment_id
__A : str = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
__A : Union[str, Any] = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
__A : Optional[Any] = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
__A : Optional[int] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
__A : Union[str, Any] = original_labelaid
self.assertEqual(__lowerCamelCase , zero_shot_classifier.entailment_id)
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'])
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__A : Any = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__A : int = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt')
__A : Optional[Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__A : List[str] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf')
__A : Union[str, Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'])
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__A : Union[str, Any] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , ) | 367 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int | str] ) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case ) )] )
def _lowerCAmelCase ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None:
if index == len(__snake_case ):
print(__snake_case )
return
for i in range(len(__snake_case ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A : Any = True
create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case )
current_sequence.pop()
__A : Any = False
lowercase__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowercase__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 190 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="data2vec-vision"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=2_24 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=True , UpperCamelCase_=[3, 5, 7, 11] , UpperCamelCase_=[1, 2, 3, 6] , UpperCamelCase_=True , UpperCamelCase_=0.4 , UpperCamelCase_=2_56 , UpperCamelCase_=1 , UpperCamelCase_=False , UpperCamelCase_=2_55 , **UpperCamelCase_ , ) -> int:
super().__init__(**UpperCamelCase_ )
__lowercase : List[str] = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : Optional[int] = intermediate_size
__lowercase : Optional[Any] = hidden_act
__lowercase : int = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = initializer_range
__lowercase : Dict = layer_norm_eps
__lowercase : int = image_size
__lowercase : Any = patch_size
__lowercase : Optional[int] = num_channels
__lowercase : Union[str, Any] = use_mask_token
__lowercase : Optional[Any] = use_absolute_position_embeddings
__lowercase : List[str] = use_relative_position_bias
__lowercase : int = use_shared_relative_position_bias
__lowercase : str = layer_scale_init_value
__lowercase : List[str] = drop_path_rate
__lowercase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase : Any = out_indices
__lowercase : int = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase : int = use_auxiliary_head
__lowercase : Optional[Any] = auxiliary_loss_weight
__lowercase : List[Any] = auxiliary_channels
__lowercase : List[str] = auxiliary_num_convs
__lowercase : Union[str, Any] = auxiliary_concat_input
__lowercase : List[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =version.parse("1.11" )
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ) -> float:
return 1E-4
| 249 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> List[Any]:
__lowercase : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Dict = parent
__lowercase : Dict = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Any = size
__lowercase : Any = do_normalize
__lowercase : int = image_mean
__lowercase : Tuple = image_std
def _lowerCamelCase ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> List[Any]:
# Initialize image_processing
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 249 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
a_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple =parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__lowercase , required=__lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__lowercase , required=__lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__lowercase , required=__lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__lowercase , default=__lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Dict , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , *__lowercase : Optional[int] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_type
SCREAMING_SNAKE_CASE__ : Tuple =tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[str] =pytorch_dump_output
SCREAMING_SNAKE_CASE__ : str =config
SCREAMING_SNAKE_CASE__ : Any =finetuning_task_name
def __magic_name__ ( self : Tuple ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE__ : str =self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : str =''''''
else:
SCREAMING_SNAKE_CASE__ : Any =self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] =''''''
convert_transfo_xl_checkpoint_to_pytorch(
__lowercase , self._config , self._pytorch_dump_output , __lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' ) | 222 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a_ = TypeVar('KT')
a_ = TypeVar('VT')
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self : Dict , __lowercase : KT | str = "root" , __lowercase : VT | None = None ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =key
SCREAMING_SNAKE_CASE__ : Optional[int] =value
SCREAMING_SNAKE_CASE__ : list[Node[KT, VT]] =[]
def __repr__( self : Any ) -> str:
return F"Node({self.key}: {self.value})"
@property
def __magic_name__ ( self : List[Any] ) -> int:
return len(self.forward )
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self : int , __lowercase : float = 0.5 , __lowercase : int = 16 ) -> int:
SCREAMING_SNAKE_CASE__ : Node[KT, VT] =Node[KT, VT]()
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Dict =p
SCREAMING_SNAKE_CASE__ : Any =max_level
def __str__( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(self )
if len(__lowercase ) == 0:
return F"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE__ : List[str] =max((len(str(__lowercase ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ : Any =max(__lowercase , 4 ) + 4
SCREAMING_SNAKE_CASE__ : Any =self.head
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =node.forward.copy()
lines.append(F"[{node.key}]".ljust(__lowercase , '''-''' ) + '''* ''' * len(__lowercase ) )
lines.append(''' ''' * label_size + '''| ''' * len(__lowercase ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] =node.forward[0]
lines.append(
F"[{node.key}]".ljust(__lowercase , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Any =node.forward
lines.append('''None'''.ljust(__lowercase ) + '''* ''' * len(__lowercase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__lowercase )
def __iter__( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ : List[Any] =node.forward[0]
def __magic_name__ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =1
while random() < self.p and level < self.max_level:
level += 1
return level
def __magic_name__ ( self : str , __lowercase : List[str] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
SCREAMING_SNAKE_CASE__ : Optional[int] =[]
SCREAMING_SNAKE_CASE__ : int =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ : str =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __magic_name__ ( self : Optional[int] , __lowercase : KT ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =self._locate_node(__lowercase )
if node is not None:
for i, update_node in enumerate(__lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ : List[str] =node.forward[i]
else:
SCREAMING_SNAKE_CASE__ : str =update_node.forward[:i]
def __magic_name__ ( self : str , __lowercase : KT , __lowercase : VT ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self._locate_node(__lowercase )
if node is not None:
SCREAMING_SNAKE_CASE__ : Dict =value
else:
SCREAMING_SNAKE_CASE__ : List[Any] =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __lowercase ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ : List[str] =level
SCREAMING_SNAKE_CASE__ : List[str] =Node(__lowercase , __lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__lowercase )
else:
SCREAMING_SNAKE_CASE__ : int =new_node
def __magic_name__ ( self : Tuple , __lowercase : VT ) -> VT | None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self._locate_node(__lowercase )
if node is not None:
return node.value
return None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SkipList()
skip_list.insert('''Key1''', 3 )
skip_list.insert('''Key2''', 1_2 )
skip_list.insert('''Key3''', 4_1 )
skip_list.insert('''Key4''', -1_9 )
SCREAMING_SNAKE_CASE__ : Any =skip_list.head
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : Dict =node.forward[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] =node.value
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =SkipList()
skip_list.insert('''Key1''', 1_0 )
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''Key5''', 7 )
skip_list.insert('''Key7''', 1_0 )
skip_list.insert('''Key10''', 5 )
skip_list.insert('''Key7''', 7 )
skip_list.insert('''Key5''', 5 )
skip_list.insert('''Key10''', 1_0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] =skip_list.head
SCREAMING_SNAKE_CASE__ : List[Any] ={}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =node.forward[0]
SCREAMING_SNAKE_CASE__ : List[Any] =node.value
if len(UpperCamelCase__ ) != 4:
print()
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =SkipList()
assert skip_list.find('''Some key''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =SkipList()
skip_list.insert('''Key2''', 2_0 )
assert skip_list.find('''Key2''' ) == 2_0
skip_list.insert('''Some Key''', 1_0 )
skip_list.insert('''Key2''', 8 )
skip_list.insert('''V''', 1_3 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 1_0
assert skip_list.find('''V''' ) == 1_3
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 1_4
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4_2 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''X''' )
def traverse_keys(UpperCamelCase__ : Dict ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _a( ):
'''simple docstring'''
def is_sorted(UpperCamelCase__ : List[Any] ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase__, lst[1:] ) )
SCREAMING_SNAKE_CASE__ : Tuple =SkipList()
for i in range(1_0 ):
skip_list.insert(UpperCamelCase__, UpperCamelCase__ )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.insert(-1_2, -1_2 )
skip_list.insert(7_7, 7_7 )
assert is_sorted(list(UpperCamelCase__ ) )
def _a( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =SkipList()
skip_list.insert(2, '''2''' )
skip_list.insert(4, '''4''' )
skip_list.insert(6, '''4''' )
skip_list.insert(4, '''5''' )
skip_list.insert(8, '''4''' )
skip_list.insert(9, '''4''' )
skip_list.delete(4 )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 222 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A ( _lowercase = True , *_lowercase , **_lowercase ):
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
SCREAMING_SNAKE_CASE : Any = False
if main_process_only:
SCREAMING_SNAKE_CASE : List[Any] = PartialState().local_process_index == 0
return _tqdm(*_lowercase , **_lowercase , disable=_lowercase )
| 182 | def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Any = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for i in range(_lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE : List[str] = True
if a[i].islower():
SCREAMING_SNAKE_CASE : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ , A__ = 0 , A__ = 0 ) -> int:
"""simple docstring"""
UpperCamelCase = right or len(A__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(A__ , A__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = int('0b' + data[0] + data[-1] , 2 )
UpperCamelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(A__ , A__ )
UpperCamelCase = xor(A__ , A__ )
UpperCamelCase = apply_sbox(A__ , temp[:4] ) # noqa: E741
UpperCamelCase = apply_sbox(A__ , temp[4:] )
UpperCamelCase = '0' * (2 - len(A__ )) + l # noqa: E741
UpperCamelCase = '0' * (2 - len(A__ )) + r
UpperCamelCase = apply_table(l + r , A__ )
UpperCamelCase = xor(A__ , A__ )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : str = input("Enter 10 bit key: ")
_lowerCamelCase : Optional[Any] = input("Enter 8 bit message: ")
_lowerCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Union[str, Any] = [2, 4, 3, 1]
_lowerCamelCase : int = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : Any = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : str = apply_table(key, paa_table)
_lowerCamelCase : str = temp[:5]
_lowerCamelCase : Any = temp[5:]
_lowerCamelCase : Dict = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
_lowerCamelCase : Optional[int] = left_shift(left)
_lowerCamelCase : Union[str, Any] = left_shift(right)
_lowerCamelCase : Tuple = left_shift(left)
_lowerCamelCase : Optional[int] = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Dict = apply_table(message, IP)
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Any = temp[4:] + temp[:4]
_lowerCamelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowerCamelCase : List[str] = apply_table(CT, IP)
_lowerCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = temp[4:] + temp[:4]
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 249 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( a__, a__, unittest.TestCase ):
a_ =StableDiffusionXLImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =PipelineTesterMixin.required_optional_params - {"""latents"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase__ = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
lowerCAmelCase__ = CLIPTextModel(_UpperCAmelCase )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_UpperCAmelCase )
lowerCAmelCase__ = CLIPTextModelWithProjection(_UpperCAmelCase )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_UpperCAmelCase )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowerCAmelCase__ = image / 2 + 0.5
if str(_UpperCAmelCase ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowerCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline(**_UpperCAmelCase )
lowerCAmelCase__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowerCAmelCase__ = sd_pipe(**_UpperCAmelCase ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline(**_UpperCAmelCase )
lowerCAmelCase__ = sd_pipe.to(_UpperCAmelCase )
lowerCAmelCase__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowerCAmelCase__ = 3 * ['this is a negative prompt']
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = 3 * [inputs['prompt']]
lowerCAmelCase__ = sd_pipe(**_UpperCAmelCase )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowerCAmelCase__ = 3 * ['this is a negative prompt']
lowerCAmelCase__ = 3 * [inputs.pop("prompt" )]
(
lowerCAmelCase__
) = sd_pipe.encode_prompt(_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowerCAmelCase__ = sd_pipe(
**_UpperCAmelCase , prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , pooled_prompt_embeds=_UpperCAmelCase , negative_pooled_prompt_embeds=_UpperCAmelCase , )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowerCAmelCase__ = np.random.RandomState(_UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase__ = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
lowerCAmelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase__ = self.get_inputs(_UpperCAmelCase )
lowerCAmelCase__ = pipe(**_UpperCAmelCase ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 340 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''BlipImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = False
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = self.image_processor
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
__A : int = self.tokenizer
__A : Optional[Any] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
return text_encoding
# add pixel_values
__A : List[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase)
if text is not None:
__A : Optional[Any] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
else:
__A : int = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase)
return encoding_image_processor
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.tokenizer.model_input_names
__A : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | 190 | 0 |
from pathlib import Path
import fire
def lowerCamelCase__ ( a__ : str , a__ : str , a__ : int ) -> str:
UpperCamelCase_ = Path(_lowerCamelCase )
UpperCamelCase_ = Path(_lowerCamelCase )
dest_dir.mkdir(exist_ok=_lowerCamelCase )
for path in src_dir.iterdir():
UpperCamelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
UpperCamelCase_ = dest_dir.joinpath(path.name )
print(_lowerCamelCase )
dest_path.open("""w""" ).write("""\n""".join(_lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 370 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCamelCase__ ( a__ : BertModel , a__ : str , a__ : str ) -> Tuple:
UpperCamelCase_ = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
UpperCamelCase_ = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(a__ ):
os.makedirs(a__ )
UpperCamelCase_ = model.state_dict()
def to_tf_var_name(a__ : str ):
for patt, repl in iter(a__ ):
UpperCamelCase_ = name.replace(a__ , a__ )
return f'''bert/{name}'''
def create_tf_var(a__ : np.ndarray , a__ : str , a__ : tf.Session ):
UpperCamelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase_ = tf.get_variable(dtype=a__ , shape=tensor.shape , name=a__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase_ = to_tf_var_name(a__ )
UpperCamelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase_ = torch_tensor.T
UpperCamelCase_ = create_tf_var(tensor=a__ , name=a__ , session=a__ )
tf.keras.backend.set_value(a__ , a__ )
UpperCamelCase_ = session.run(a__ )
print(f'''Successfully created {tf_name}: {np.allclose(a__ , a__ )}''' )
UpperCamelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(a__ , os.path.join(a__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def lowerCamelCase__ ( a__ : Union[str, Any]=None ) -> Any:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=a__ , required=a__ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=a__ , default=a__ , required=a__ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=a__ , required=a__ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=a__ , required=a__ , help="""Directory in which to save tensorflow model""" )
UpperCamelCase_ = parser.parse_args(a__ )
UpperCamelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 261 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : int = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""roc_bert"""
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=True , __a=0 , __a="absolute" , __a=None , __a=True , __a=True , __a=7_68 , __a=9_10 , __a=5_12 , __a=2_48_58 , __a=True , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = use_cache
__lowerCAmelCase = enable_pronunciation
__lowerCAmelCase = enable_shape
__lowerCAmelCase = pronunciation_embed_dim
__lowerCAmelCase = pronunciation_vocab_size
__lowerCAmelCase = shape_embed_dim
__lowerCAmelCase = shape_vocab_size
__lowerCAmelCase = concat_input
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = classifier_dropout
super().__init__(pad_token_id=__a , **__a )
| 57 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 0 |
'''simple docstring'''
from torch import nn
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = class_size
_snake_case = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.mlp(lowerCAmelCase_ )
return logits
| 160 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase : List[Any] = os.path.join(git_repo_path, "src", "diffusers")
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = find_backend(' if not is_torch_available():' )
self.assertEqual(lowerCAmelCase_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers_and_onnx' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers' , lowerCAmelCase_ )
self.assertIn('flax_and_transformers' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers_and_onnx' , lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , '\nCONSTANT = None\n' )
_snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase_ )
| 160 | 1 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 88 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , eps=1e-6 , affine=UpperCAmelCase__)
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , double_self_attention=UpperCAmelCase__ , norm_elementwise_affine=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__)
])
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : bool = True , ) ->List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4)
A__ = self.norm(UpperCAmelCase__)
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.proj_in(UpperCAmelCase__)
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , class_labels=UpperCAmelCase__ , )
# 3. Output
A__ = self.proj_out(UpperCAmelCase__)
A__ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
A__ = hidden_states.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase__)
| 14 | """simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: int = None
if token is not None:
a__: Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
a__: int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Dict:
a__: Dict = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Dict = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: List[Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: Optional[int] = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: List[Any] = None
if token is not None:
a__: Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = result.headers['Location']
a__: Optional[int] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
a__: int = os.path.join(_SCREAMING_SNAKE_CASE , F'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
a__: List[Any] = []
a__: Optional[Any] = []
a__: List[Any] = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
a__: Optional[int] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__: Union[str, Any] = line[: line.index(': ' )]
a__: Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
a__: Optional[int] = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
a__: Union[str, Any] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
F'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
a__: Tuple = None
if job_name and job_links:
a__: Dict = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
a__: int = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->str:
a__: int = []
a__: Optional[int] = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
a__: str = Counter()
counter.update([x[1] for x in logs] )
a__: int = counter.most_common()
a__: Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__: List[str] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
a__: Optional[Any] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: List[str] = test.split('::' )[0]
if test.startswith('tests/models/' ):
a__: Dict = test.split('/' )[2]
else:
a__: Any = None
return test
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: int = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__: List[Any] = [x for x in logs if x[2] is not None]
a__: Optional[Any] = {x[2] for x in logs}
a__: Dict = {}
for test in tests:
a__: Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__: Union[str, Any] = counter.most_common()
a__: List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__: List[Any] = sum(error_counts.values() )
if n_errors > 0:
a__: Any = {'count': n_errors, 'errors': error_counts}
a__: Optional[int] = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = '| no. | error | status |'
a__: Any = '|-:|:-|:-|'
a__: str = [header, sep]
for error in reduced_by_error:
a__: int = reduced_by_error[error]['count']
a__: Tuple = F'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: List[str] = '| model | no. of errors | major error | count |'
a__: str = '|-:|-:|-:|-:|'
a__: int = [header, sep]
for model in reduced_by_model:
a__: Tuple = reduced_by_model[model]['count']
a__ , a__: Dict = list(reduced_by_model[model]['errors'].items() )[0]
a__: Dict = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowercase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ = k.find(' / ')
lowercase__ = k[index + len(' / ') :]
lowercase__ = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ = reduce_by_error(errors)
lowercase__ = reduce_by_model(errors)
lowercase__ = make_github_table(reduced_by_error)
lowercase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 290 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase__ : Dict =NewType('''DataClass''', Any)
UpperCAmelCase__ : int =NewType('''DataClassType''', Any)
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def _lowercase ( _UpperCAmelCase ) -> Callable[[str], Any]:
lowerCamelCase ={str(_UpperCAmelCase ): choice for choice in choices}
return lambda _UpperCAmelCase : str_to_choice.get(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( *,
_UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase ={}
if aliases is not None:
lowerCamelCase =aliases
if help is not None:
lowerCamelCase =help
return dataclasses.field(metadata=_UpperCAmelCase , default=_UpperCAmelCase , default_factory=_UpperCAmelCase , **_UpperCAmelCase )
class __A ( a ):
__A = 42
def __init__( self , UpperCAmelCase_ , **UpperCAmelCase_ ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCamelCase =ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
lowerCamelCase =[dataclass_types]
lowerCamelCase =list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =f"""--{field.name}"""
lowerCamelCase =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
lowerCamelCase =kwargs.pop("""aliases""" , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =[aliases]
lowerCamelCase =getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , """UnionType""" ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase =getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase =(
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase =getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase ={}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
lowerCamelCase =field.type.__args__
else:
lowerCamelCase =[x.value for x in field.type]
lowerCamelCase =make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
lowerCamelCase =field.default
else:
lowerCamelCase =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase =copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase =default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase ="""?"""
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase =True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =field.type.__args__[0]
lowerCamelCase ="""+"""
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase =field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase =True
else:
lowerCamelCase =field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase =field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase =field.default_factory()
else:
lowerCamelCase =True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase =False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
if hasattr(UpperCAmelCase_ , """_argument_group_name""" ):
lowerCamelCase =self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase =self
try:
lowerCamelCase =get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
lowerCamelCase =""".""".join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
lowerCamelCase =type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase =[]
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase =ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase , lowerCamelCase =args_file_parser.parse_known_args(args=UpperCAmelCase_ )
lowerCamelCase =vars(UpperCAmelCase_ ).get(args_file_flag.lstrip("""-""" ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
lowerCamelCase =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase =file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase , lowerCamelCase =self.parse_known_args(args=UpperCAmelCase_ )
lowerCamelCase =[]
for dtype in self.dataclass_types:
lowerCamelCase ={f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
lowerCamelCase ={k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False ):
lowerCamelCase =set(args.keys() )
lowerCamelCase =[]
for dtype in self.dataclass_types:
lowerCamelCase ={f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
lowerCamelCase ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase =dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}""" )
return tuple(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False ):
with open(Path(UpperCAmelCase_ ) , encoding="""utf-8""" ) as open_json_file:
lowerCamelCase =json.loads(open_json_file.read() )
lowerCamelCase =self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False ):
lowerCamelCase =self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 262 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase__ : Dict =NewType('''DataClass''', Any)
UpperCAmelCase__ : int =NewType('''DataClassType''', Any)
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def _lowercase ( _UpperCAmelCase ) -> Callable[[str], Any]:
lowerCamelCase ={str(_UpperCAmelCase ): choice for choice in choices}
return lambda _UpperCAmelCase : str_to_choice.get(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( *,
_UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase ={}
if aliases is not None:
lowerCamelCase =aliases
if help is not None:
lowerCamelCase =help
return dataclasses.field(metadata=_UpperCAmelCase , default=_UpperCAmelCase , default_factory=_UpperCAmelCase , **_UpperCAmelCase )
class __A ( a ):
__A = 42
def __init__( self , UpperCAmelCase_ , **UpperCAmelCase_ ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCamelCase =ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
lowerCamelCase =[dataclass_types]
lowerCamelCase =list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =f"""--{field.name}"""
lowerCamelCase =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
lowerCamelCase =kwargs.pop("""aliases""" , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =[aliases]
lowerCamelCase =getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , """UnionType""" ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase =getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase =(
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase =getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase ={}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
lowerCamelCase =field.type.__args__
else:
lowerCamelCase =[x.value for x in field.type]
lowerCamelCase =make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
lowerCamelCase =field.default
else:
lowerCamelCase =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase =copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase =default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase ="""?"""
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase =True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =field.type.__args__[0]
lowerCamelCase ="""+"""
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase =field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase =True
else:
lowerCamelCase =field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase =field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase =field.default_factory()
else:
lowerCamelCase =True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase =False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
if hasattr(UpperCAmelCase_ , """_argument_group_name""" ):
lowerCamelCase =self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase =self
try:
lowerCamelCase =get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
lowerCamelCase =""".""".join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
lowerCamelCase =type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase =[]
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase =ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase , lowerCamelCase =args_file_parser.parse_known_args(args=UpperCAmelCase_ )
lowerCamelCase =vars(UpperCAmelCase_ ).get(args_file_flag.lstrip("""-""" ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
lowerCamelCase =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase =file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase , lowerCamelCase =self.parse_known_args(args=UpperCAmelCase_ )
lowerCamelCase =[]
for dtype in self.dataclass_types:
lowerCamelCase ={f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
lowerCamelCase ={k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False ):
lowerCamelCase =set(args.keys() )
lowerCamelCase =[]
for dtype in self.dataclass_types:
lowerCamelCase ={f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
lowerCamelCase ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase =dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}""" )
return tuple(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False ):
with open(Path(UpperCAmelCase_ ) , encoding="""utf-8""" ) as open_json_file:
lowerCamelCase =json.loads(open_json_file.read() )
lowerCamelCase =self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = False ):
lowerCamelCase =self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 262 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = AlbertConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : List[str] = AlbertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 36 | import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 2_5_5 , _a = True , _a = IMAGENET_DEFAULT_MEAN , _a = IMAGENET_DEFAULT_STD , **_a , ) -> None:
super().__init__(**_a )
_a : int = size if size is not None else {'''shortest_edge''': 2_2_4}
_a : List[str] = get_size_dict(_a , default_to_square=_a )
_a : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Optional[int] = get_size_dict(_a , param_name='''crop_size''' )
_a : Optional[int] = do_resize
_a : str = size
_a : List[str] = resample
_a : str = do_center_crop
_a : Optional[Any] = crop_size
_a : Union[str, Any] = do_rescale
_a : str = rescale_factor
_a : List[Any] = do_normalize
_a : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowercase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : Optional[int] = get_size_dict(_a , default_to_square=_a )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_a : Optional[int] = int((2_5_6 / 2_2_4) * size['''shortest_edge'''] )
_a : Any = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
_a : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_a , size=(size_dict['''height'''], size_dict['''width''']) , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_a : int = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> BatchFeature:
_a : int = do_resize if do_resize is not None else self.do_resize
_a : Optional[Any] = resample if resample is not None else self.resample
_a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Any = do_rescale if do_rescale is not None else self.do_rescale
_a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Dict = do_normalize if do_normalize is not None else self.do_normalize
_a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_a : Union[str, Any] = image_std if image_std is not None else self.image_std
_a : Union[str, Any] = size if size is not None else self.size
_a : str = get_size_dict(_a , default_to_square=_a )
_a : Optional[int] = crop_size if crop_size is not None else self.crop_size
_a : Union[str, Any] = get_size_dict(_a , param_name='''crop_size''' )
_a : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : int = [self.resize(_a , _a , _a ) for image in images]
if do_center_crop:
_a : Any = [self.center_crop(_a , _a ) for image in images]
if do_rescale:
_a : Any = [self.rescale(_a , _a ) for image in images]
if do_normalize:
_a : Optional[int] = [self.normalize(_a , _a , _a ) for image in images]
_a : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : Dict = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 355 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
UpperCamelCase :Tuple = 2**power
UpperCamelCase :List[Any] = str(__magic_name__ )
UpperCamelCase :List[str] = list(__magic_name__ )
UpperCamelCase :Optional[int] = 0
for i in list_num:
sum_of_num += int(__magic_name__ )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
UpperCAmelCase_ : str = solution(power)
print('''Sum of the digits is: ''', result)
| 38 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """trajectory_transformer"""
snake_case__ : Optional[Any] = ["""past_key_values"""]
snake_case__ : Tuple = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : int=249 , __lowerCamelCase : str=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=128 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.0006 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1E-12 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=50_256 , __lowerCamelCase : Union[str, Any]=50_256 , **__lowerCamelCase : Dict , ):
UpperCamelCase :Dict = vocab_size
UpperCamelCase :int = action_weight
UpperCamelCase :Tuple = reward_weight
UpperCamelCase :str = value_weight
UpperCamelCase :Tuple = max_position_embeddings
UpperCamelCase :Tuple = block_size
UpperCamelCase :Optional[int] = action_dim
UpperCamelCase :int = observation_dim
UpperCamelCase :List[str] = transition_dim
UpperCamelCase :List[Any] = learning_rate
UpperCamelCase :Optional[Any] = n_layer
UpperCamelCase :Any = n_head
UpperCamelCase :List[str] = n_embd
UpperCamelCase :Any = embd_pdrop
UpperCamelCase :str = attn_pdrop
UpperCamelCase :Union[str, Any] = resid_pdrop
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = layer_norm_eps
UpperCamelCase :Optional[int] = kaiming_initializer_range
UpperCamelCase :Tuple = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 38 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Union[str, Any] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__a: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 214 | '''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ):
lowercase__ : Dict = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase__ : Dict = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase__ : int = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase__ : Any = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
if split_mlp_wi:
lowercase__ : Tuple = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase__ : Tuple = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase__ : Dict = (wi_a, wi_a)
else:
lowercase__ : List[Any] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase__ : int = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __UpperCamelCase ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = traverse_util.flatten_dict(variables['''target'''] )
lowercase__ : Any = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : List[str] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase )
lowercase__ : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''attention''' )
lowercase__ : Dict = layer_norm
lowercase__ : Any = k.T
lowercase__ : Optional[int] = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : Any = v.T
# Block i, layer 1 (MLP).
lowercase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ : List[Any] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , UpperCAmelCase )
lowercase__ : int = layer_norm
if split_mlp_wi:
lowercase__ : Union[str, Any] = wi[0].T
lowercase__ : List[str] = wi[1].T
else:
lowercase__ : int = wi.T
lowercase__ : List[str] = wo.T
lowercase__ : Dict = old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowercase__ : str = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''self_attention''' )
lowercase__ : List[str] = layer_norm
lowercase__ : List[Any] = k.T
lowercase__ : List[Any] = o.T
lowercase__ : List[str] = q.T
lowercase__ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowercase__ : Any = layer_norm
lowercase__ : Optional[int] = k.T
lowercase__ : Optional[Any] = o.T
lowercase__ : Tuple = q.T
lowercase__ : List[Any] = v.T
# Block i, layer 2 (MLP).
lowercase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ : List[Any] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , UpperCAmelCase )
lowercase__ : Optional[int] = layer_norm
if split_mlp_wi:
lowercase__ : List[str] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : str = wi.T
lowercase__ : Optional[Any] = wo.T
lowercase__ : Any = old['''decoder/decoder_norm/scale''']
lowercase__ : Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Dict = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Optional[int] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : List[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase__ : Union[str, Any] = state_dict['''shared.weight''']
return state_dict
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(UpperCAmelCase )
lowercase__ : str = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase )
lowercase__ : List[Any] = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
lowercase__ : Dict = TaConfig.from_json_file(UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : List[str] = TaEncoderModel(UpperCAmelCase )
else:
lowercase__ : Tuple = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
__a: Optional[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
__a: Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 214 | 1 |
'''simple docstring'''
import operator
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None ) -> list:
snake_case__ : int = operator.lt if reverse else operator.gt
snake_case__ : Any = solution or []
if not arr:
return solution
snake_case__ : int = [arr.pop(0 )]
for i, item in enumerate(_lowerCAmelCase ):
if _operator(_lowerCAmelCase , sublist[-1] ):
sublist.append(_lowerCAmelCase )
arr.pop(_lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(_lowerCAmelCase )
else:
while sublist:
snake_case__ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(_lowerCAmelCase ):
if not _operator(_lowerCAmelCase , _lowerCAmelCase ):
solution.insert(_lowerCAmelCase , _lowerCAmelCase )
break
else:
solution.append(_lowerCAmelCase )
strand_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 35 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]:
__A : List[str] = k_size // 2
__A ,__A : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__A : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(__snake_case ) + square(__snake_case )) / (2 * square(__snake_case )) )
return g
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : int ) -> Union[str, Any]:
__A ,__A : Tuple = image.shape[0], image.shape[1]
# dst image height and width
__A : Tuple = height - k_size + 1
__A : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__A : str = zeros((dst_height * dst_width, k_size * k_size) )
__A : Optional[Any] = 0
for i, j in product(range(__snake_case ) , range(__snake_case ) ):
__A : int = ravel(image[i : i + k_size, j : j + k_size] )
__A : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
__A : List[Any] = gen_gaussian_kernel(__snake_case , __snake_case )
__A : Any = ravel(__snake_case )
# reshape and get the dst image
__A : Dict = dot(__snake_case , __snake_case ).reshape(__snake_case , __snake_case ).astype(__snake_case )
return dst
if __name__ == "__main__":
# read original image
lowercase__ : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase__ : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase__ : Any = gaussian_filter(gray, 3, sigma=1)
lowercase__ : str = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey() | 190 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCAmelCase__ : str = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Dict=6.0 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str="fp4" , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
_A: Dict = load_in_abit
_A: List[Any] = load_in_abit
_A: int = llm_inta_threshold
_A: Tuple = llm_inta_skip_modules
_A: Optional[Any] = llm_inta_enable_fpaa_cpu_offload
_A: Any = llm_inta_has_fpaa_weight
_A: str = bnb_abit_quant_type
_A: Tuple = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_A: int = torch.floataa
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , torch.dtype ):
_A: Dict = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , lowerCAmelCase_ ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase_ ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase_ ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase_ ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase_ ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase_ ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __magic_name__ ( cls : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Any = cls(**lowerCAmelCase_ )
_A: List[Any] = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
to_remove.append(lowerCAmelCase_ )
for key in to_remove:
kwargs.pop(lowerCAmelCase_ , lowerCAmelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] ):
"""simple docstring"""
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
_A: Optional[int] = self.to_dict()
_A: List[str] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + '''\n'''
writer.write(lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = copy.deepcopy(self.__dict__ )
_A: Any = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : str ):
"""simple docstring"""
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : bool = True ):
"""simple docstring"""
if use_diff is True:
_A: Any = self.to_diff_dict()
else:
_A: List[str] = self.to_dict()
return json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + "\n"
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.to_dict()
# get the default config dict
_A: Dict = BitsAndBytesConfig().to_dict()
_A: Tuple = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_A: str = value
return serializable_config_dict
| 356 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( lowercase , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = multiprocessing.Manager()
UpperCamelCase = manager.list()
UpperCamelCase = multiprocessing.Process(target=lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCamelCase = shutil.rmtree
UpperCamelCase = os.rmdir
UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCamelCase = {}
with swallow_io():
with time_limit(lowercase ):
exec(lowercase , lowercase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
UpperCamelCase = rmtree
UpperCamelCase = rmdir
UpperCamelCase = chdir
@contextlib.contextmanager
def A ( lowercase ) -> str:
'''simple docstring'''
def signal_handler(lowercase , lowercase ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , lowercase )
signal.signal(signal.SIGALRM , lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase ):
with contextlib.redirect_stderr(lowercase ):
with redirect_stdin(lowercase ):
yield
@contextlib.contextmanager
def A ( ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase ):
yield dirname
class lowercase ( _SCREAMING_SNAKE_CASE ):
pass
class lowercase ( io.StringIO ):
def __UpperCamelCase ( self , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def __UpperCamelCase ( self , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
raise OSError
def __UpperCamelCase ( self , *A_ , **A_ ) -> int:
"""simple docstring"""
raise OSError
def __UpperCamelCase ( self , *A_ , **A_ ) -> List[Any]:
"""simple docstring"""
return False
class lowercase ( contextlib._RedirectStream ): # type: ignore
__lowercase : Optional[int] = "stdin"
@contextlib.contextmanager
def A ( lowercase ) -> Optional[int]:
'''simple docstring'''
if root == ".":
yield
return
UpperCamelCase = os.getcwd()
os.chdir(lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase )
def A ( lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCamelCase = None
UpperCamelCase = None
import os
UpperCamelCase = '1'
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
import shutil
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
import subprocess
UpperCamelCase = None # type: ignore
UpperCamelCase = None
import sys
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
| 222 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : List[Any] = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 222 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'pegasus'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple ,__lowerCamelCase : Optional[int]=5_02_65 ,__lowerCamelCase : str=10_24 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Any=40_96 ,__lowerCamelCase : int=16 ,__lowerCamelCase : List[Any]=12 ,__lowerCamelCase : Union[str, Any]=40_96 ,__lowerCamelCase : str=16 ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : Any=0.0 ,__lowerCamelCase : List[str]=True ,__lowerCamelCase : Union[str, Any]=True ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : List[str]=10_24 ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.0 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : Optional[int]=False ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Optional[Any]=1 ,__lowerCamelCase : List[str]=1 ,**__lowerCamelCase : Tuple ,):
'''simple docstring'''
a = vocab_size
a = max_position_embeddings
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = encoder_layerdrop
a = decoder_layerdrop
a = use_cache
a = encoder_layers
a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,forced_eos_token_id=_a ,**_a ,)
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return self.d_model
| 369 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class lowerCamelCase_ ( a_ ):
def __init__( self : int ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
requires_backends(self ,'''vision''' )
self.check_model_type(__lowerCamelCase )
def __call__( self : int ,__lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**__lowerCamelCase : str ):
'''simple docstring'''
return super().__call__(__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ,**__lowerCamelCase : Dict ):
'''simple docstring'''
return {}, {}, {}
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = load_image(__lowerCamelCase )
a = image.size
a = self.image_processor(images=__lowerCamelCase ,return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = self.model(**__lowerCamelCase )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = model_outputs.predicted_depth
a = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=__lowerCamelCase )
a = prediction.squeeze().cpu().numpy()
a = (output * 2_55 / np.max(__lowerCamelCase )).astype('''uint8''' )
a = Image.fromarray(__lowerCamelCase )
a = {}
a = predicted_depth
a = depth
return output_dict
| 330 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
UpperCamelCase =XGLMConfig
UpperCamelCase ={}
UpperCamelCase ="gelu"
def __init__( self , UpperCamelCase_ , UpperCamelCase_=14 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=0.0_2 , ) -> Tuple:
__lowercase : List[str] = parent
__lowercase : Any = batch_size
__lowercase : Dict = seq_length
__lowercase : List[str] = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : List[Any] = use_labels
__lowercase : str = vocab_size
__lowercase : Tuple = d_model
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : Union[str, Any] = ffn_dim
__lowercase : Optional[int] = activation_function
__lowercase : Optional[int] = activation_dropout
__lowercase : List[str] = attention_dropout
__lowercase : List[str] = max_position_embeddings
__lowercase : int = initializer_range
__lowercase : Dict = None
__lowercase : List[Any] = 0
__lowercase : List[str] = 2
__lowercase : int = 1
def _lowerCamelCase ( self ) -> Union[str, Any]:
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowerCamelCase ( self ) -> int:
__lowercase : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowercase : Optional[int] = None
if self.use_input_mask:
__lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[Any] = self.get_config()
__lowercase : Any = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCamelCase ( self ) -> Dict:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase_ , )
def _lowerCamelCase ( self ) -> int:
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : List[Any] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase =(
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase =False
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = TFXGLMModelTester(self )
__lowercase : int = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def _lowerCamelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def _lowerCamelCase ( self ) -> int:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[Any] = TFXGLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowerCamelCase ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self , UpperCamelCase_=True ) -> Any:
__lowercase : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__lowercase : Any = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase : List[str] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowercase : str = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : int = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__lowercase : Any = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
__lowercase : List[Any] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
__lowercase : str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
__lowercase : int = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ , seed=[7, 0] )
__lowercase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase_ )
__lowercase : str = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__lowercase : Optional[int] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__lowercase : Optional[Any] = '''left'''
# use different length sentences to test batching
__lowercase : Dict = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__lowercase : int = tokenizer(UpperCamelCase_ , return_tensors='''tf''' , padding=UpperCamelCase_ )
__lowercase : Tuple = inputs['''input_ids''']
__lowercase : Optional[int] = model.generate(input_ids=UpperCamelCase_ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
__lowercase : int = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase : Any = model.generate(input_ids=UpperCamelCase_ , max_new_tokens=12 )
__lowercase : List[str] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase : Union[str, Any] = model.generate(input_ids=UpperCamelCase_ , max_new_tokens=12 )
__lowercase : Any = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowercase : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase_ )
__lowercase : int = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase_ )
__lowercase : Union[str, Any] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [non_padded_sentence, padded_sentence] )
| 249 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> List[Any]:
__lowercase : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Dict = parent
__lowercase : Dict = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Any = size
__lowercase : Any = do_normalize
__lowercase : int = image_mean
__lowercase : Tuple = image_std
def _lowerCamelCase ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> List[Any]:
# Initialize image_processing
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 249 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
UpperCamelCase__ : int =0
for ch in input_str:
UpperCamelCase__ : Optional[Any] =ord(UpperCAmelCase )
UpperCamelCase__ : List[str] =pow(2 , UpperCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_SCREAMING_SNAKE_CASE : Union[str, Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
UpperCamelCase__ : Union[str, Any] =[]
for num in range(len(UpperCAmelCase ) ):
UpperCamelCase__ : Tuple =0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase__ : Any =odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 157 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowercase : Dict =get_logger(__name__)
_lowercase : Optional[Any] =R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class snake_case__ :
"""simple docstring"""
@add_start_docstrings(__lowercase )
def __call__( self , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case__ :
"""simple docstring"""
@add_start_docstrings(__lowercase )
def __call__( self , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case__ (lowercase__ ):
"""simple docstring"""
@add_start_docstrings(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
for processor in self:
a__ : str = inspect.signature(processor.__call__ ).parameters
if len(__lowercase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
a__ : Tuple = processor(__lowercase , __lowercase , __lowercase , **__lowercase )
else:
a__ : Optional[Any] = processor(__lowercase , __lowercase , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
a__ : Any = temperature
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : int = scores / self.temperature
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase = -float("""Inf""" ) , __lowercase = 1 ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__lowercase , __lowercase ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
a__ : Optional[int] = top_p
a__ : Optional[int] = filter_value
a__ : Tuple = min_tokens_to_keep
def __call__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : List[str] = lax.top_k(__lowercase , scores.shape[-1] )
a__ : Union[str, Any] = jnp.full_like(__lowercase , self.filter_value )
a__ : Optional[int] = jax.nn.softmax(__lowercase , axis=-1 ).cumsum(axis=-1 )
a__ : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
a__ : Optional[Any] = jnp.roll(__lowercase , 1 )
score_mask |= score_mask.at[:, 0].set(__lowercase )
# min tokens to keep
a__ : Any = score_mask.at[:, : self.min_tokens_to_keep].set(__lowercase )
a__ : Union[str, Any] = jnp.where(__lowercase , __lowercase , __lowercase )
a__ : int = jax.lax.sort_key_val(__lowercase , __lowercase )[-1]
return next_scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase = -float("""Inf""" ) , __lowercase = 1 ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
a__ : List[Any] = max(__lowercase , __lowercase )
a__ : Tuple = filter_value
def __call__( self , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : int = scores.shape
a__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
a__ : List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
a__ : Union[str, Any] = lax.top_k(__lowercase , __lowercase )
a__ : Union[str, Any] = jnp.broadcast_to((jnp.arange(__lowercase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
a__ : Tuple = topk_scores.flatten()
a__ : List[Any] = topk_indices.flatten() + shift
a__ : List[str] = next_scores_flat.at[topk_indices_flat].set(__lowercase )
a__ : int = next_scores_flat.reshape(__lowercase , __lowercase )
return next_scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = bos_token_id
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : str = jnp.full(scores.shape , -float("""inf""" ) )
a__ : List[Any] = 1 - jnp.bool_(cur_len - 1 )
a__ : Dict = jnp.where(__lowercase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = max_length
a__ : List[str] = eos_token_id
def __call__( self , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = jnp.full(scores.shape , -float("""inf""" ) )
a__ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
a__ : str = jnp.where(__lowercase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__lowercase , __lowercase ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
a__ : Optional[Any] = min_length
a__ : List[str] = eos_token_id
def __call__( self , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
a__ : Tuple = jnp.where(__lowercase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = list(__lowercase )
a__ : List[str] = begin_index
def __call__( self , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
a__ : str = jnp.where(__lowercase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : List[Any] = list(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : str = dict(__lowercase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
a__ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
a__ : str = force_token_array.at[index].set(__lowercase )
a__ : List[Any] = jnp.intaa(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
def _force_token(__lowercase ):
a__ : Optional[int] = scores.shape[0]
a__ : Optional[int] = self.force_token_array[generation_idx]
a__ : Tuple = jnp.ones_like(__lowercase , dtype=scores.dtype ) * -float("""inf""" )
a__ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
a__ : Optional[int] = lax.dynamic_update_slice(__lowercase , __lowercase , (0, current_token) )
return new_scores
a__ : Union[str, Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowercase ) , lambda: scores , ) , )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Any = generate_config.eos_token_id
a__ : int = generate_config.no_timestamps_token_id
a__ : List[str] = generate_config.no_timestamps_token_id + 1
a__ : str = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowercase , """max_initial_timestamp_index""" ):
a__ : str = generate_config.max_initial_timestamp_index
else:
a__ : List[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
a__ : Optional[Any] = model_config.vocab_size
def __call__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : Tuple = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(__lowercase , __lowercase ):
a__ : List[str] = jnp.where((cur_len - self.begin_index) >= 1 , __lowercase , __lowercase )
a__ : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowercase , )
a__ : Any = jnp.where((cur_len - self.begin_index) < 2 , __lowercase , __lowercase )
a__ : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowercase , __lowercase , )
return jnp.where(
__lowercase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , __lowercase , )
a__ : Dict = jax.vmap(__lowercase )(__lowercase , __lowercase )
a__ : Optional[int] = jnp.where(cur_len == self.begin_index , __lowercase , __lowercase )
a__ : Optional[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowercase , )
a__ : Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index
a__ : Tuple = jnp.where(
__lowercase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , __lowercase , )
# if sum of probability over timestamps is above any other token, sample timestamp
a__ : List[Any] = jax.nn.log_softmax(__lowercase , axis=-1 )
def handle_cumulative_probs(__lowercase , __lowercase ):
a__ : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
a__ : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , __lowercase , )
a__ : Union[str, Any] = jax.vmap(__lowercase )(__lowercase , __lowercase )
return scores
| 170 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : int = 1_0000
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[datasets.Features] = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ParquetConfig
def __lowercase ( self : int ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : int ,_a : List[Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a ,(str, list, tuple) ):
_a : int = data_files
if isinstance(_a ,_a ):
_a : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
_a : Dict = []
for split_name, files in data_files.items():
if isinstance(_a ,_a ):
_a : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Optional[int] = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a ,'rb' ) as f:
_a : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a ,gen_kwargs={'files': files} ) )
return splits
def __lowercase ( self : List[str] ,_a : pa.Table ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : List[str] = table_cast(_a ,self.info.features.arrow_schema )
return pa_table
def __lowercase ( self : Any ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a ,'rb' ) as f:
_a : int = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
_a : Tuple = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(_a )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" )
raise
| 354 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
_a : int = FileLock(str(tmpdir / 'foo.lock' ) )
_a : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
_a : Any = 0.01
with locka.acquire():
with pytest.raises(__a ):
_a : int = time.time()
locka.acquire(__a )
assert time.time() - _start > timeout
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = 'a' * 1_0_0_0 + '.lock'
_a : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__a )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
_a : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__a ):
locka.acquire(0 )
| 5 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __A ( a_ :Dict , a_ :List[Any] , a_ :str) -> Optional[int]:
__a : str = 0
if start < end:
__a : Optional[int] = randint(a_ , a_)
__a : Optional[Any] = a[end]
__a : Dict = a[pivot]
__a : int = temp
__a , __a : List[Any] = _in_place_partition(a_ , a_ , a_)
count += _in_place_quick_sort(a_ , a_ , p - 1)
count += _in_place_quick_sort(a_ , p + 1 , a_)
return count
def __A ( a_ :Dict , a_ :Tuple , a_ :Optional[Any]) -> str:
__a : Optional[int] = 0
__a : Union[str, Any] = randint(a_ , a_)
__a : Optional[Any] = a[end]
__a : Optional[Any] = a[pivot]
__a : Optional[int] = temp
__a : List[Any] = start - 1
for index in range(a_ , a_):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__a : int = new_pivot_index + 1
__a : Union[str, Any] = a[new_pivot_index]
__a : str = a[index]
__a : List[Any] = temp
__a : Dict = a[new_pivot_index + 1]
__a : int = a[end]
__a : Tuple = temp
return new_pivot_index + 1, count
A = TemporaryFile()
A = 100 # 1000 elements are to be sorted
A , A = 0, 1 # mean and standard deviation
A = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
A = np.load(outfile)
A = len(M) - 1
A = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z) | 160 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = inspect.getfile(accelerate.test_utils )
__a : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def _lowerCamelCase ( self ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : int = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _lowerCamelCase ( self ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Optional[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _lowerCamelCase ( self ):
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
__a : Union[str, Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = Accelerator()
A = (accelerator.state.process_index + 2, 10)
A = torch.randint(0, 10, shape).to(accelerator.device)
A = ''''''
A = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 160 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A :
'''simple docstring'''
A__ = 42
A__ = None
A__ = None
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> bool:
"""simple docstring"""
def is_valid_tree(__magic_name__ : Any ) -> bool:
if node is None:
return True
if not isinstance(a__ , a__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a__ )
)
return is_binary_search_tree_recursive_check(a__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowercase__ = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ = model(_UpperCAmelCase )["""last_hidden_state"""]
lowercase__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 146 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : str =logging.getLogger()
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowerCAmelCase_ : str = parser.parse_args()
return args.f
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : List[Any] = {}
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase_ , '''all_results.json''' )
if os.path.exists(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , '''r''' ) as f:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase ( )-> int:
lowerCAmelCase_ : Union[str, Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
_UpperCAmelCase : List[Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
def lowercase_ ( cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : Tuple = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase_ : str = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase_ ( cls ) -> List[str]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[str] = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Optional[int] = get_results(__lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : str = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase_ : List[Any] = get_results(__lowercase )
self.assertLess(result['''perplexity'''] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Union[str, Any] = get_results(__lowercase )
self.assertLess(result['''perplexity'''] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase_ : Any = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Tuple = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ : int = get_results(__lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Dict = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Optional[int] = get_results(__lowercase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 2_8 )
self.assertGreaterEqual(result['''eval_exact'''] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Any = get_results(__lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : int = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Any = get_results(__lowercase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[str] = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Optional[Any] = get_results(__lowercase )
self.assertGreaterEqual(result['''eval_bleu'''] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''translation_no_trainer''' ) ) )
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowercase )
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Dict = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Any = get_results(__lowercase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Union[str, Any] = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
lowerCAmelCase_ : Optional[Any] = get_results(__lowercase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , '''image_classification_no_trainer''' ) ) ) | 262 |
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans | 262 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
UpperCAmelCase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
UpperCAmelCase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ),
} ), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = 1, __magic_name__ = 4, ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__magic_name__, hypotheses=__magic_name__, min_len=__magic_name__, max_len=__magic_name__ )
}
| 247 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__ = 16, __magic_name__ = 88, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = 32, __magic_name__ = None, __magic_name__ = False, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "geglu", __magic_name__ = None, ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : str = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__, attention_head_dim=__magic_name__, in_channels=__magic_name__, num_layers=__magic_name__, dropout=__magic_name__, norm_num_groups=__magic_name__, cross_attention_dim=__magic_name__, attention_bias=__magic_name__, sample_size=__magic_name__, num_vector_embeds=__magic_name__, activation_fn=__magic_name__, num_embeds_ada_norm=__magic_name__, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase__ : Any = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase__ : Optional[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase__ : int = [1, 0]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__ = True, ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = hidden_states
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase__ : List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase__ : List[str] = self.transformer_index_for_condition[i]
UpperCamelCase__ : List[str] = self.transformers[transformer_index](
__magic_name__, encoder_hidden_states=__magic_name__, timestep=__magic_name__, cross_attention_kwargs=__magic_name__, return_dict=__magic_name__, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase__ : List[str] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase__ : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 247 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Dict = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE, unittest.TestCase ):
UpperCAmelCase__ = AlbertTokenizer
UpperCAmelCase__ = AlbertTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def A_ ( self : Union[str, Any] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : int = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Tuple , UpperCAmelCase : List[str] ) -> Dict:
lowerCamelCase__ : Optional[int] = 'this is a test'
lowerCamelCase__ : Tuple = 'this is a test'
return input_text, output_text
def A_ ( self : List[Any] ) -> Dict:
lowerCamelCase__ : Optional[int] = '<pad>'
lowerCamelCase__ : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 30000 )
def A_ ( self : str ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def A_ ( self : str ) -> str:
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ : Dict = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase__ : Tuple = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase__ : Optional[int] = tokenizer.encode(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Any ) -> Optional[int]:
lowerCamelCase__ : List[Any] = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCamelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def A_ ( self : List[str] ) -> Optional[Any]:
lowerCamelCase__ : str = AlbertTokenizer(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = tokenizer.encode('sequence builders' )
lowerCamelCase__ : List[Any] = tokenizer.encode('multi-sequence build' )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def A_ ( self : int ) -> List[str]:
# fmt: off
lowerCamelCase__ : Tuple = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 50 |
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
__A = []
def generate(a_ , a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__A , __A = arr[k - 1], arr[i]
else: # k is odd
__A , __A = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 15 | 0 |
import os
from math import logaa
def A_ ( _lowerCAmelCase = "base_exp.txt" ) -> int:
UpperCamelCase : float = 0
UpperCamelCase : Dict = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCAmelCase ) , _lowerCAmelCase ) ) ):
UpperCamelCase , UpperCamelCase : Optional[Any] = list(map(_lowerCAmelCase , line.split("," ) ) )
if x * logaa(_lowerCAmelCase ) > largest:
UpperCamelCase : int = x * logaa(_lowerCAmelCase )
UpperCamelCase : Tuple = i + 1
return result
if __name__ == "__main__":
print(solution())
| 140 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase : Any = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase : str = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Tuple = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase ) -> int:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : Dict = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
UpperCamelCase : Optional[int] = _readaa(_lowerCAmelCase )
UpperCamelCase : int = _readaa(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(rows * cols * num_images )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
UpperCamelCase : Optional[Any] = data.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
return data
@deprecated(_lowerCAmelCase , "Please use tf.one_hot on tensors." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
UpperCamelCase : List[str] = labels_dense.shape[0]
UpperCamelCase : str = numpy.arange(_lowerCAmelCase ) * num_classes
UpperCamelCase : Optional[Any] = numpy.zeros((num_labels, num_classes) )
UpperCamelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase , "Please use tf.data to implement this functionality." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=10 ) -> str:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
UpperCamelCase : int = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
UpperCamelCase : List[str] = _readaa(_lowerCAmelCase )
UpperCamelCase : List[Any] = bytestream.read(_lowerCAmelCase )
UpperCamelCase : List[str] = numpy.frombuffer(_lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase , _lowerCAmelCase )
return labels
class A__ :
@deprecated(
A_ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCamelCase : Optional[Any] = dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
UpperCamelCase : List[str] = 1_0000
UpperCamelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
UpperCamelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCamelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCamelCase : str = images.astype(numpy.floataa )
UpperCamelCase : str = numpy.multiply(A_ , 1.0 / 2_55.0 )
UpperCamelCase : Optional[int] = images
UpperCamelCase : str = labels
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : Optional[int] = 0
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._images
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._labels
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._num_examples
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._epochs_completed
def __UpperCamelCase( self , A_ , A_=False , A_=True ):
'''simple docstring'''
if fake_data:
UpperCamelCase : Optional[int] = [1] * 784
UpperCamelCase : Optional[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
UpperCamelCase : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCamelCase : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : int = self.images[perma]
UpperCamelCase : Any = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCamelCase : List[Any] = self._num_examples - start
UpperCamelCase : Union[str, Any] = self._images[start : self._num_examples]
UpperCamelCase : str = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
UpperCamelCase : Union[str, Any] = self.images[perm]
UpperCamelCase : Union[str, Any] = self.labels[perm]
# Start next epoch
UpperCamelCase : Tuple = 0
UpperCamelCase : Tuple = batch_size - rest_num_examples
UpperCamelCase : List[str] = self._index_in_epoch
UpperCamelCase : Dict = self._images[start:end]
UpperCamelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCamelCase : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase , "Please write your own downloading logic." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase , _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[int] = f.size()
print("Successfully downloaded" , _lowerCAmelCase , _lowerCAmelCase , "bytes." )
return filepath
@deprecated(
_lowerCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=dtypes.floataa , _lowerCAmelCase=True , _lowerCAmelCase=5000 , _lowerCAmelCase=None , _lowerCAmelCase=DEFAULT_SOURCE_URL , ) -> List[str]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCAmelCase , one_hot=_lowerCAmelCase , dtype=_lowerCAmelCase , seed=_lowerCAmelCase )
UpperCamelCase : Any = fake()
UpperCamelCase : List[str] = fake()
UpperCamelCase : Union[str, Any] = fake()
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
if not source_url: # empty string check
UpperCamelCase : str = DEFAULT_SOURCE_URL
UpperCamelCase : List[str] = "train-images-idx3-ubyte.gz"
UpperCamelCase : Optional[int] = "train-labels-idx1-ubyte.gz"
UpperCamelCase : List[str] = "t10k-images-idx3-ubyte.gz"
UpperCamelCase : Union[str, Any] = "t10k-labels-idx1-ubyte.gz"
UpperCamelCase : Optional[int] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[str] = _extract_images(_lowerCAmelCase )
UpperCamelCase : Dict = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : List[Any] = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
UpperCamelCase : Any = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_images_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : Any = _extract_images(_lowerCAmelCase )
UpperCamelCase : List[str] = _maybe_download(
_lowerCAmelCase , _lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase , "rb" ) as f:
UpperCamelCase : str = _extract_labels(_lowerCAmelCase , one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
UpperCamelCase : Any = (
"Validation size should be between 0 and "
F"""{len(_lowerCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(_lowerCAmelCase )
UpperCamelCase : str = train_images[:validation_size]
UpperCamelCase : int = train_labels[:validation_size]
UpperCamelCase : List[str] = train_images[validation_size:]
UpperCamelCase : Union[str, Any] = train_labels[validation_size:]
UpperCamelCase : List[str] = {"dtype": dtype, "reshape": reshape, "seed": seed}
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : List[str] = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCamelCase : Any = _DataSet(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase , validation=_lowerCAmelCase , test=_lowerCAmelCase )
| 140 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
snake_case_ = True
from torch.cuda.amp import autocast
snake_case_ = logging.getLogger(__name__)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCamelCase : Optional[str] = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCamelCase : Optional[bool] = field(
default=__snake_case , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__lowerCamelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
__lowerCamelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
__lowerCamelCase : Optional[float] = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
__lowerCamelCase : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
__lowerCamelCase : Optional[float] = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
__lowerCamelCase : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCamelCase : Optional[str] = field(
default=__snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowerCamelCase : Optional[str] = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__lowerCamelCase : bool = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowerCamelCase : Optional[int] = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__lowerCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowerCamelCase : Optional[int] = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
__lowerCamelCase : List[str] = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCamelCase : WavaVecaProcessor
__lowerCamelCase : Union[bool, str] = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
def __call__( self , a):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowercase__ : List[str] = [{'input_values': feature['input_values']} for feature in features]
lowercase__ : Any = [{'input_ids': feature['labels']} for feature in features]
lowercase__ : Tuple = self.processor.pad(
a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowercase__ : List[str] = self.processor.pad(
labels=a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
lowercase__ : Optional[int] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1) , -100)
lowercase__ : List[str] = labels
return batch
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def snake_case_ ( self , a , a):
model.train()
lowercase__ : List[Any] = self._prepare_inputs(a)
if self.use_amp:
with autocast():
lowercase__ : Any = self.compute_loss(a , a)
else:
lowercase__ : Union[str, Any] = self.compute_loss(a , a)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ : Union[str, Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ : List[Any] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""")
if self.args.gradient_accumulation_steps > 1:
lowercase__ : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(a).backward()
elif self.use_apex:
with amp.scale_loss(a , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(a)
else:
loss.backward()
return loss.detach()
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase__ : Optional[int] = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase__ : Optional[Any] = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
lowercase__ : Optional[int] = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(SCREAMING_SNAKE_CASE_ : str ):
lowercase__ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE_ , '' , batch['sentence'] ).lower() + ' '
return batch
lowercase__ : str = train_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=['sentence'] )
lowercase__ : Any = eval_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=['sentence'] )
def extract_all_chars(SCREAMING_SNAKE_CASE_ : Tuple ):
lowercase__ : List[Any] = ' '.join(batch['text'] )
lowercase__ : Optional[int] = list(set(SCREAMING_SNAKE_CASE_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase__ : Union[str, Any] = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , )
lowercase__ : Optional[Any] = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , )
lowercase__ : Optional[Any] = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
lowercase__ : List[Any] = {v: k for k, v in enumerate(SCREAMING_SNAKE_CASE_ )}
lowercase__ : List[str] = vocab_dict[' ']
del vocab_dict[" "]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
lowercase__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ )
lowercase__ : int = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase__ : int = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
lowercase__ : Optional[int] = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
if data_args.max_val_samples is not None:
lowercase__ : List[Any] = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase__ : str = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(SCREAMING_SNAKE_CASE_ : str ):
lowercase__ , lowercase__ : List[str] = torchaudio.load(batch['path'] )
lowercase__ : Optional[int] = resampler(SCREAMING_SNAKE_CASE_ ).squeeze().numpy()
lowercase__ : Dict = 16_000
lowercase__ : Union[str, Any] = batch['text']
return batch
lowercase__ : Optional[int] = train_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase__ : Union[str, Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
lowercase__ : Dict = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(SCREAMING_SNAKE_CASE_ )
return batch
lowercase__ : int = train_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , )
lowercase__ : Union[str, Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase__ : str = datasets.load_metric('wer' )
def compute_metrics(SCREAMING_SNAKE_CASE_ : str ):
lowercase__ : List[str] = pred.predictions
lowercase__ : Tuple = np.argmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
lowercase__ : str = processor.tokenizer.pad_token_id
lowercase__ : Dict = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
# we do not want to group tokens when computing the metrics
lowercase__ : Dict = processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = wer_metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase__ : Tuple = DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# Initialize our Trainer
lowercase__ : Tuple = CTCTrainer(
model=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ : Optional[int] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase__ : Tuple = model_args.model_name_or_path
else:
lowercase__ : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase__ : str = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
lowercase__ : Optional[Any] = train_result.metrics
lowercase__ : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
lowercase__ : Optional[Any] = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
lowercase__ : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ : Any = trainer.evaluate()
lowercase__ : Union[str, Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE_ )
return results
if __name__ == "__main__":
main()
| 214 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=13 , a=7 , a=False , a=True , a=False , a=False , a=19 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ):
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : Tuple = use_input_mask
lowercase__ : List[str] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : Union[str, Any] = num_choices
lowercase__ : Optional[int] = scope
def snake_case_ ( self):
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : int = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ : str = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
lowercase__ : str = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def snake_case_ ( self , a , a , a , a , a , a):
lowercase__ : Dict = EsmForProteinFolding(config=a).float()
model.to(a)
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a)
lowercase__ : Dict = model(a)
lowercase__ : int = model(a)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def snake_case_ ( self):
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = config_and_inputs
lowercase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ):
__lowerCamelCase : Dict = False
__lowerCamelCase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCamelCase : Union[str, Any] = ()
__lowerCamelCase : List[Any] = {} if is_torch_available() else {}
__lowerCamelCase : Optional[Any] = False
def snake_case_ ( self):
lowercase__ : Tuple = EsmFoldModelTester(self)
lowercase__ : List[Any] = ConfigTester(self , config_class=a , hidden_size=37)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
@unittest.skip('Does not support attention outputs')
def snake_case_ ( self):
pass
@unittest.skip
def snake_case_ ( self):
pass
@unittest.skip('Esm does not support embedding resizing')
def snake_case_ ( self):
pass
@unittest.skip('Esm does not support embedding resizing')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support passing input embeds!')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.')
def snake_case_ ( self):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold only has one output format.')
def snake_case_ ( self):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support input chunking.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.')
def snake_case_ ( self):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def snake_case_ ( self):
pass
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@slow
def snake_case_ ( self):
lowercase__ : Dict = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float()
model.eval()
lowercase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
lowercase__ : Optional[int] = model(a)['positions']
lowercase__ : Dict = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4))
| 214 | 1 |
from timeit import timeit
snake_case__ : Union[str, Any] = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _a ( lowerCamelCase: str ) -> List[str]:
'''simple docstring'''
__A = 0
__A = len(lowerCamelCase__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _a ( lowerCamelCase: str ) -> List[str]:
'''simple docstring'''
__A = len(lowerCamelCase__ ) // 2
__A = len(lowerCamelCase__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowerCamelCase__ ) )
def _a ( lowerCamelCase: str ) -> Optional[Any]:
'''simple docstring'''
if len(lowerCamelCase__ ) <= 2:
return True
if s[0] == s[len(lowerCamelCase__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _a ( lowerCamelCase: str ) -> Optional[int]:
'''simple docstring'''
return s == s[::-1]
def _a ( lowerCamelCase: str ) -> List[Any]:
'''simple docstring'''
__A = F"""all({name}(key) is value for key, value in test_data.items())"""
__A = F"""from __main__ import test_data, {name}"""
__A = 50_00_00
__A = timeit(stmt=lowerCamelCase__ , setup=lowerCamelCase__ , number=lowerCamelCase__ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 350 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """vit_msn"""
def __init__(self :str , _UpperCamelCase :List[Any]=768 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Any=12 , _UpperCamelCase :Dict=3072 , _UpperCamelCase :str="gelu" , _UpperCamelCase :str=0.0 , _UpperCamelCase :Union[str, Any]=0.0 , _UpperCamelCase :Optional[int]=0.0_2 , _UpperCamelCase :Any=1e-06 , _UpperCamelCase :Any=224 , _UpperCamelCase :Optional[Any]=16 , _UpperCamelCase :Any=3 , _UpperCamelCase :str=True , **_UpperCamelCase :Dict , )-> Union[str, Any]:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
| 250 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def a__ ( a__ = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__SCREAMING_SNAKE_CASE = nums[0]
for i in range(1 , len(snake_case__ ) ):
__SCREAMING_SNAKE_CASE = nums[i]
__SCREAMING_SNAKE_CASE = max(snake_case__ , ans + num , snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase : Optional[int] = int(input('Enter number of elements : ').strip())
UpperCAmelCase : List[str] = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 267 |
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : Optional[int] = TypeVar("T")
UpperCAmelCase : Tuple = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : List[Any] = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : Any = Union[str, bytes, os.PathLike]
| 354 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Tuple=9_9 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = MegatronBertModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = MegatronBertForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = MegatronBertForCausalLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = MegatronBertForPreTraining(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = MegatronBertForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
# test_resize_embeddings = False
lowercase__ = False
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
lowercase_ = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
if return_labels:
if model_class in get_values(lowerCAmelCase_):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_)
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_)
return inputs_dict
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = MegatronBertModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCAmelCase : Any = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase_)
lowercase_ = MegatronBertModel.from_pretrained(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , lowerCAmelCase_)
lowercase_ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3):
for jj in range(3):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = """ii={} jj={} a={} b={}""".format(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(math.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rel_tol=lowerCAmelCase_ , abs_tol=lowerCAmelCase_) , msg=lowerCAmelCase_)
| 313 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list:
if len(UpperCamelCase ) <= 1:
return [tuple(UpperCamelCase )]
lowerCamelCase__ : Union[str, Any] = []
def generate(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = [0] * n
res.append(tuple(UpperCamelCase ) )
lowerCamelCase__ : str = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase__ , lowerCamelCase__ : Dict = arr[i], arr[0]
else:
lowerCamelCase__ , lowerCamelCase__ : Dict = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase ) )
c[i] += 1
lowerCamelCase__ : Optional[int] = 0
else:
lowerCamelCase__ : Dict = 0
i += 1
generate(len(UpperCamelCase ) , UpperCamelCase )
return res
if __name__ == "__main__":
_A : str =input('''Enter numbers separated by a comma:\n''').strip()
_A : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 41 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = True
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCamelCase ( self , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowerCamelCase ( self ):
'''simple docstring'''
self.enable_tiling(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = True
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=__UpperCAmelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 330 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase: Tuple = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class a__( lowerCamelCase__ ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , __snake_case : Optional[float] = 0.1 , __snake_case : Optional[Union[str, Callable]] = "gelu" , __snake_case : Optional[int] = 3_05_22 , __snake_case : Optional[int] = 10_24 , __snake_case : Optional[int] = 40_96 , __snake_case : Optional[int] = 12 , __snake_case : Optional[int] = 16 , __snake_case : Optional[int] = 40_96 , __snake_case : Optional[int] = 12 , __snake_case : Optional[int] = 16 , __snake_case : Optional[float] = 0.1 , __snake_case : Optional[float] = 0.1 , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[float] = 0.02 , __snake_case : Optional[bool] = True , __snake_case : Optional[bool] = True , __snake_case : Optional[int] = 0 , __snake_case : Optional[int] = 2 , __snake_case : Optional[int] = 32 , __snake_case : Optional[int] = 1_28 , __snake_case : Optional[bool] = False , __snake_case : Optional[float] = 0.0 , __snake_case : Optional[bool] = True , __snake_case : Optional[int] = 0 , __snake_case : Optional[int] = 1 , __snake_case : Optional[int] = 2 , **__snake_case : int , ):
a : Dict = vocab_size
a : List[str] = hidden_size
a : str = encoder_ffn_dim
a : Dict = num_encoder_layers
a : Dict = num_encoder_attention_heads
a : int = decoder_ffn_dim
a : List[str] = num_decoder_layers
a : Optional[int] = num_decoder_attention_heads
a : Optional[Any] = max_position_embeddings
a : List[Any] = init_std # Normal(0, this parameter)
a : Optional[Any] = activation_function
# parameters for xlmprophetnet
a : str = ngram
a : Dict = num_buckets
a : str = relative_max_distance
a : Optional[Any] = disable_ngram_loss
a : str = eps
# 3 Types of Dropout
a : int = attention_dropout
a : Optional[Any] = activation_dropout
a : List[str] = dropout
a : Tuple = use_cache
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , add_cross_attention=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
@property
def lowercase_ ( self : Optional[Any] ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowercase_ ( self : Tuple , __snake_case : Dict ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' ) | 96 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CTRLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Tuple = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : Optional[Any] = {'unk_token': '<unk>'}
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : int , **__snake_case : str ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : Any ):
a : int = 'adapt react readapt apt'
a : Any = 'adapt react readapt apt'
return input_text, output_text
def lowercase_ ( self : Dict ):
a : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : List[str] = 'adapt react readapt apt'
a : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Dict = tokens + [tokenizer.unk_token]
a : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) | 96 | 1 |
from __future__ import annotations
def _UpperCamelCase ( snake_case__, snake_case__ ) -> list[tuple[int, int]]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = position
__UpperCAmelCase : Optional[int] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__UpperCAmelCase : Dict = []
for position in positions:
__UpperCAmelCase , __UpperCAmelCase : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case__ )
return permissible_positions
def _UpperCamelCase ( snake_case__ ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> bool:
if is_complete(snake_case__ ):
return True
for position in get_valid_pos(snake_case__, len(snake_case__ ) ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = position
if board[y][x] == 0:
__UpperCAmelCase : List[str] = curr + 1
if open_knight_tour_helper(snake_case__, snake_case__, curr + 1 ):
return True
__UpperCAmelCase : Tuple = 0
return False
def _UpperCamelCase ( snake_case__ ) -> list[list[int]]:
__UpperCAmelCase : Dict = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
__UpperCAmelCase : Optional[Any] = 1
if open_knight_tour_helper(snake_case__, (i, j), 1 ):
return board
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Union[str, Any] ) -> Dict:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : Optional[Any] = kwargs.get("model_save_dir" , __lowerCamelCase )
__UpperCAmelCase : str = kwargs.get("latest_model_name" , __lowerCamelCase )
def __call__( self: int , **__lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Tuple=None ) -> List[str]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__UpperCAmelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Any , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[Union[bool, str, None]] = None , __lowerCamelCase: Optional[Union[str, None]] = None , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional["ort.SessionOptions"] = None , **__lowerCamelCase: Union[str, Any] , ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).parent
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).name
__UpperCAmelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : int = None
if len(str(__lowerCamelCase ).split("@" ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 157 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __snake_case ( a , a ):
UpperCAmelCase__ : int = 1
@register_to_config
def __init__( self : Optional[Any] , _snake_case : Tuple=2000 , _snake_case : Tuple=0.1 , _snake_case : Optional[int]=20 , _snake_case : Dict=1e-3):
"""simple docstring"""
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase ( self : List[str] , _snake_case : Dict , _snake_case : List[str] = None):
"""simple docstring"""
UpperCAmelCase_ = torch.linspace(1 , self.config.sampling_eps , _snake_case , device=_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[str] , _snake_case : List[Any]=None):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''')
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase_ = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase_ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
UpperCAmelCase_ = std.flatten()
while len(std.shape) < len(score.shape):
UpperCAmelCase_ = std.unsqueeze(-1)
UpperCAmelCase_ = -score / std
# compute
UpperCAmelCase_ = -1.0 / len(self.timesteps)
UpperCAmelCase_ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase_ = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
UpperCAmelCase_ = beta_t.unsqueeze(-1)
UpperCAmelCase_ = -0.5 * beta_t * x
UpperCAmelCase_ = torch.sqrt(_snake_case)
UpperCAmelCase_ = drift - diffusion**2 * score
UpperCAmelCase_ = x + drift * dt
# add noise
UpperCAmelCase_ = randn_tensor(x.shape , layout=x.layout , generator=_snake_case , device=x.device , dtype=x.dtype)
UpperCAmelCase_ = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__( self : Optional[Any]):
"""simple docstring"""
return self.config.num_train_timesteps
| 51 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 0 |
from __future__ import annotations
from math import pow, sqrt
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 356 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = None
__snake_case = None
@property
def UpperCamelCase__ ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''padding_value''' ) )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_UpperCAmelCase )
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
snake_case_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = self.feat_extract_tester.seq_length_diff
snake_case_ = self.feat_extract_tester.max_seq_length + pad_diff
snake_case_ = self.feat_extract_tester.min_seq_length
snake_case_ = self.feat_extract_tester.batch_size
snake_case_ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
snake_case_ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' )[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = feat_extract.pad(_UpperCAmelCase , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , pad_to_multiple_of=10 )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
self.assertTrue(all(len(_UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
snake_case_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
snake_case_ = len(input[0] )
for input_slice in input[1:]:
if len(_UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
if not np.allclose(np.asarray(_UpperCAmelCase ) , np.asarray(_UpperCAmelCase ) , atol=1E-3 ):
return False
return True
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=_UpperCAmelCase )
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to smallest with np
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
# truncate to middle
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase , return_tensors='''np''' , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_UpperCAmelCase )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
snake_case_ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''longest''' , truncation=_UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_UpperCAmelCase ):
feat_extract.pad(_UpperCAmelCase , padding='''max_length''' , truncation=_UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
snake_case_ = 12
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_UpperCAmelCase , )
snake_case_ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
snake_case_ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
snake_case_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_UpperCAmelCase ) )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_padding(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self._check_truncation(numpify=_UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.feat_extract_dict
snake_case_ = True
snake_case_ = self.feature_extraction_class(**_UpperCAmelCase )
snake_case_ = self.feat_extract_tester.prepare_inputs_for_common()
snake_case_ = [len(_UpperCAmelCase ) for x in speech_inputs]
snake_case_ = feat_extract.model_input_names[0]
snake_case_ = BatchFeature({input_name: speech_inputs} )
snake_case_ = min(_UpperCAmelCase )
snake_case_ = feat_extract.pad(
_UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 267 | 0 |
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: str = XLMTokenizer
A: Optional[Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCamelCase__ : Optional[int] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ : Optional[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = '''lower newer'''
UpperCamelCase__ : List[str] = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase__ : Tuple = '''lower'''
UpperCamelCase__ : Dict = ['''low''', '''er</w>''']
UpperCamelCase__ : Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = tokens + ['''<unk>''']
UpperCamelCase__ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Any = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCamelCase__ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 146 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( UpperCAmelCase ):
_lowercase = "conditional_detr"
_lowercase = ["past_key_values"]
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.25 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Tuple = backbone_config.get("model_type" )
_UpperCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Tuple = config_class.from_dict(A_ )
_UpperCAmelCase : Tuple = use_timm_backbone
_UpperCAmelCase : str = backbone_config
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : Any = encoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : Tuple = encoder_attention_heads
_UpperCAmelCase : str = decoder_ffn_dim
_UpperCAmelCase : Optional[Any] = decoder_layers
_UpperCAmelCase : str = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : List[str] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Dict = init_xavier_std
_UpperCAmelCase : Union[str, Any] = encoder_layerdrop
_UpperCAmelCase : str = decoder_layerdrop
_UpperCAmelCase : Optional[Any] = encoder_layers
_UpperCAmelCase : Optional[int] = auxiliary_loss
_UpperCAmelCase : List[Any] = position_embedding_type
_UpperCAmelCase : Dict = backbone
_UpperCAmelCase : Optional[Any] = use_pretrained_backbone
_UpperCAmelCase : List[str] = dilation
# Hungarian matcher
_UpperCAmelCase : int = class_cost
_UpperCAmelCase : Optional[Any] = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : Any = mask_loss_coefficient
_UpperCAmelCase : List[str] = dice_loss_coefficient
_UpperCAmelCase : Tuple = cls_loss_coefficient
_UpperCAmelCase : Any = bbox_loss_coefficient
_UpperCAmelCase : int = giou_loss_coefficient
_UpperCAmelCase : str = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
_UpperCAmelCase : str = self.__class__.model_type
return output
class a ( UpperCAmelCase ):
_lowercase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 1e-5
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 12
| 356 |
from __future__ import annotations
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = order
# a_{0} ... a_{k}
_UpperCAmelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase : Optional[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase : Dict = [0.0] * self.order
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if len(A_ ) < self.order:
_UpperCAmelCase : List[str] = [1.0, *a_coeffs]
if len(A_ ) != self.order + 1:
_UpperCAmelCase : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
if len(A_ ) != self.order + 1:
_UpperCAmelCase : int = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
_UpperCAmelCase : Optional[Any] = a_coeffs
_UpperCAmelCase : Union[str, Any] = b_coeffs
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase : Optional[Any] = self.input_history[:-1]
_UpperCAmelCase : Optional[int] = self.output_history[:-1]
_UpperCAmelCase : Optional[Any] = sample
_UpperCAmelCase : str = result
return result
| 189 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
SCREAMING_SNAKE_CASE = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
SCREAMING_SNAKE_CASE = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RoFormerTokenizer
def __init__( self : Optional[int] , snake_case_ : str=None , snake_case_ : Optional[int]=None , snake_case_ : int=True , snake_case_ : str="[UNK]" , snake_case_ : Dict="[SEP]" , snake_case_ : Tuple="[PAD]" , snake_case_ : Tuple="[CLS]" , snake_case_ : Tuple="[MASK]" , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , **snake_case_ : int , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , snake_case_ ) != do_lower_case
or pre_tok_state.get("strip_accents" , snake_case_ ) != strip_accents
):
A__ = getattr(snake_case_ , pre_tok_state.pop("type" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = pre_tok_class(**snake_case_ )
A__ = do_lower_case
def __getstate__( self : Tuple ) -> Any:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = BertPreTokenizer()
return state
def __setstate__( self : int , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = d
A__ = self.__dict__["_tokenizer"].get_vocab()
A__ = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __magic_name__ ( self : int , snake_case_ : int , snake_case_ : Any=None , snake_case_ : Tuple=None , snake_case_ : List[Any]=False , **snake_case_ : List[Any] , ) -> List[Any]:
'''simple docstring'''
A__ = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 247 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(lowercase_ , exponent // 2 , lowercase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase_ , exponent - 1 , lowercase_ )) % modulo_value
def _SCREAMING_SNAKE_CASE ( lowercase_ = 17_77 , lowercase_ = 18_55 , lowercase_ = 8 ) -> int:
A__ = base
for _ in range(1 , lowercase_ ):
A__ = _modexpt(lowercase_ , lowercase_ , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 247 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = tempfile.mkdtemp()
# fmt: off
__A : str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : List[str] = {'unk_token': '<unk>'}
__A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__A : Optional[Any] = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : str = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Optional[int] = self.get_image_processor()
__A : str = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
__A : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
__A : int = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
__A : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Optional[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : str = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : int = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = self.prepare_image_inputs()
__A : str = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : Union[str, Any] = processor(text=_UpperCAmelCase)
__A : Optional[Any] = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : Tuple = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Optional[Any] = 'lower newer'
__A : Optional[int] = self.prepare_image_inputs()
__A : Dict = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Tuple = processor.batch_decode(_UpperCAmelCase)
__A : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : Dict = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names) | 360 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__snake_case , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__snake_case , default=5 )
parser.add_argument('--batch_size' , type=__snake_case , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__snake_case , default=1 )
parser.add_argument('--freeze' , type=__snake_case , default=__snake_case )
parser.add_argument('--learning_rate' , type=__snake_case , default=5e-4 )
parser.add_argument('--seed' , type=__snake_case , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__snake_case , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__snake_case , default=10 )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--output_dir' , type=__snake_case , default='./results' )
return parser.parse_args()
lowercase__ : Tuple = load('''accuracy''')
def _lowerCAmelCase ( __snake_case : int ) -> Any:
__A ,__A : List[Any] = eval_pred
__A : Dict = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : int = trainer
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if control.should_evaluate:
__A : str = deepcopy(_UpperCAmelCase)
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train')
return control_copy
def _lowerCAmelCase ( ) -> str:
__A : List[Any] = get_args()
set_seed(args.seed )
__A : Union[str, Any] = load_dataset('codeparrot/codecomplex' , split='train' )
__A : Optional[int] = dataset.train_test_split(test_size=0.2 )
__A : Union[str, Any] = train_test['test'].train_test_split(test_size=0.5 )
__A : Optional[Any] = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__A : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
__A : Tuple = tokenizer.eos_token
__A : Dict = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__A : Optional[int] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__A : Optional[Any] = False
__A : Dict = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__snake_case : Optional[Any] ):
__A : Optional[Any] = tokenizer(example['src'] , truncation=__snake_case , max_length=10_24 )
__A : str = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__A : str = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['train'].column_names , )
__A : str = DataCollatorWithPadding(tokenizer=__snake_case )
__A : str = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
__A : Tuple = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('Training...' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main() | 190 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase = """pt"""
elif is_tf_available():
_UpperCAmelCase = """tf"""
else:
_UpperCAmelCase = """jax"""
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = ByTaTokenizer
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=2_0 , lowercase=5 ):
"""simple docstring"""
A_ : List[str] = []
for i in range(len(lowercase ) ):
try:
A_ : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A_ : str = list(filter(lambda lowercase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
A_ : Optional[int] = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
A_ : str = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
A_ : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
A_ : List[str] = [t[0] for t in toks]
# Ensure consistency
A_ : Optional[Any] = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
A_ : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
A_ : Dict = ' ' + output_txt
A_ : List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.ta_base_tokenizer
A_ : List[str] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
A_ : Dict = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.ta_base_tokenizer
A_ : str = 'Unicode €.'
A_ : Optional[int] = tokenizer(lowercase )
A_ : int = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
A_ : List[Any] = tokenizer.decode(lowercase )
self.assertEqual(lowercase , 'Unicode €.</s>' )
A_ : Dict = tokenizer('e è é ê ë' )
A_ : str = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
A_ : Optional[int] = tokenizer.decode(lowercase )
self.assertEqual(lowercase , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.ta_base_tokenizer
A_ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
A_ : str = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
A_ : Optional[int] = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
A_ : Dict = list(batch.input_ids.numpy()[0] )
else:
A_ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.ta_base_tokenizer
A_ : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A_ : Any = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.ta_base_tokenizer
A_ : str = [
'Summary of the text.',
'Another summary.',
]
A_ : Dict = tokenizer(
text_target=lowercase , max_length=3_2 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.ta_base_tokenizer
A_ : Optional[Any] = ['A long paragraph for summarization. </s>']
A_ : str = ['Summary of the text. </s>']
# fmt: off
A_ : str = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
A_ : List[Any] = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
A_ : Union[str, Any] = tokenizer(lowercase , text_target=lowercase )
self.assertEqual(lowercase , batch['input_ids'][0] )
self.assertEqual(lowercase , batch['labels'][0] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
A_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ : List[Any] = tempfile.mkdtemp()
A_ : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
A_ : List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
A_ : str = tokenizer.__class__.from_pretrained(lowercase )
A_ : int = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
A_ : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ : Optional[int] = tempfile.mkdtemp()
A_ : List[str] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
A_ : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
A_ : int = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
A_ : Tuple = tokenizer.__class__.from_pretrained(lowercase )
A_ : List[Any] = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
A_ : List[Any] = tokenizer.__class__.from_pretrained(lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
A_ : List[Any] = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
A_ : Dict = json.load(lowercase )
A_ : Optional[Any] = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
A_ : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
A_ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A_ : Optional[int] = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A_ : Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
A_ : Optional[int] = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
A_ : Tuple = tokenizer_class.from_pretrained(lowercase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A_ : Optional[int] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
A_ : List[Any] = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A_ : Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
A_ : List[str] = 0
A_ : Dict = tokenizer.convert_ids_to_tokens(
lowercase , skip_special_tokens=lowercase )
for attr in attributes_list:
setattr(lowercase , attr + '_id' , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + '_id' ) , lowercase )
setattr(lowercase , attr + '_id' , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + '_id' ) , lowercase )
setattr(lowercase , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens_ids' ) , [] )
setattr(lowercase , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowercase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 140 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | 1 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = []
lowercase__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE )
while queue:
lowercase__ = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE )
if cnt != len(SCREAMING_SNAKE_CASE ):
print('''Cycle exists''' )
else:
print(SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 93 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase__ = True
for j in range(SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase__ = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 93 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _snake_case ( _snake_case : Union[str, Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
lowerCAmelCase : List[Any] = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
lowerCAmelCase : List[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
lowerCAmelCase : List[str] = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
lowerCAmelCase : Optional[Any] = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
lowerCAmelCase : Tuple = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
lowerCAmelCase : int = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
lowerCAmelCase : List[Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
lowerCAmelCase : List[str] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
lowerCAmelCase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' )
lowerCAmelCase : Optional[int] = key.replace('''text_encoder.module''' , '''flava.text_model''' )
lowerCAmelCase : Optional[int] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
lowerCAmelCase : Tuple = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
lowerCAmelCase : Tuple = key.replace('''text_projection''' , '''flava.text_projection''' )
lowerCAmelCase : Dict = key.replace('''image_projection''' , '''flava.image_projection''' )
lowerCAmelCase : Union[str, Any] = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase : Dict = value
return upgrade
@torch.no_grad()
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : int=None ):
if config_path is not None:
lowerCAmelCase : Optional[int] = FlavaConfig.from_pretrained(_snake_case )
else:
lowerCAmelCase : Optional[Any] = FlavaConfig()
lowerCAmelCase : List[Any] = FlavaForPreTraining(_snake_case ).eval()
lowerCAmelCase : int = convert_dalle_checkpoint(_snake_case , _snake_case , save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
lowerCAmelCase : Tuple = torch.load(_snake_case , map_location='''cpu''' )
else:
lowerCAmelCase : str = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' )
lowerCAmelCase : str = upgrade_state_dict(_snake_case , _snake_case )
hf_model.load_state_dict(_snake_case )
lowerCAmelCase : Optional[int] = hf_model.state_dict()
lowerCAmelCase : Any = count_parameters(_snake_case )
lowerCAmelCase : List[Any] = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
snake_case__ : Any = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 60 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a__ :
_SCREAMING_SNAKE_CASE : Tuple = PegasusConfig
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : Optional[int] = 'gelu'
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=20 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , ):
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : int = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : str = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = eos_token_id
_lowercase : Optional[int] = pad_token_id
_lowercase : Optional[Any] = bos_token_id
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowercase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase : Optional[int] = prepare_pegasus_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = 20
_lowercase : int = model_class_name(_UpperCamelCase )
_lowercase : str = model.encode(inputs_dict["input_ids"] )
_lowercase , _lowercase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
_lowercase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_lowercase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
_lowercase : Any = model.decode(_UpperCamelCase , _UpperCamelCase )
_lowercase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = 20
_lowercase : List[Any] = model_class_name(_UpperCamelCase )
_lowercase : Dict = model.encode(inputs_dict["input_ids"] )
_lowercase , _lowercase : int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : int = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
_lowercase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
_lowercase : Dict = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
_lowercase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _A ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ) -> int:
if attention_mask is None:
_lowercase : int = np.not_equal(snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowercase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = FlaxPegasusModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Optional[int] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
_lowercase : Union[str, Any] = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest("JIT Enabled" ):
_lowercase : Optional[Any] = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Optional[int] = model_class(_UpperCamelCase )
_lowercase : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_lowercase : List[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest("JIT Enabled" ):
_lowercase : List[Any] = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowercase : Optional[Any] = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : List[Any] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_UpperCamelCase )
_lowercase : Optional[int] = np.ones((1, 1) )
_lowercase : int = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_lowercase : str = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_lowercase : str = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_lowercase : Optional[int] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_lowercase : Optional[int] = tokenizer(_UpperCamelCase , return_tensors="np" , truncation=_UpperCamelCase , max_length=512 , padding=_UpperCamelCase )
_lowercase : Tuple = model.generate(**_UpperCamelCase , num_beams=2 ).sequences
_lowercase : Union[str, Any] = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
assert tgt_text == decoded
| 250 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : List[Any] = 32
def _lowerCamelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Optional[Any]:
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
_a = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ : List[str] = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
_a = 2
# Initialize accelerator
_a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_a = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a = batch_size // MAX_GPU_BATCH_SIZE
_a = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
_a , _a = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a = model(**lowercase )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowercase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase , references=lowercase , )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
def _lowerCamelCase ( ) -> List[str]:
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_a = parser.parse_args()
_a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
import requests
lowerCAmelCase_ : List[Any] = 'YOUR API KEY'
def _lowerCamelCase ( lowercase : str , lowercase : str = giphy_api_key ) -> list:
_a = "+".join(query.split() )
_a = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_a = requests.get(lowercase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 346 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
UpperCamelCase__ = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ):
__lowerCAmelCase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowerCAmelCase = bs[:]
__lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
__lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
return pairs
class a__ ( snake_case__ ):
_a : str = VOCAB_FILES_NAMES
_a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(_A )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = errors # how to handle errors in decoding
__lowerCAmelCase = bytes_to_unicode()
__lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
__lowerCAmelCase = {}
__lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self.encoder )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(_A )
__lowerCAmelCase = get_pairs(_A )
if not pairs:
return token
while True:
__lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(_A ):
try:
__lowerCAmelCase = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(_A )
__lowerCAmelCase = new_word
if len(_A ) == 1:
break
else:
__lowerCAmelCase = get_pairs(_A )
__lowerCAmelCase = " ".join(_A )
__lowerCAmelCase = word
return word
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
for token in re.findall(self.pat , _A ):
__lowerCAmelCase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.decoder.get(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "".join(_A )
__lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" )
__lowerCAmelCase = 0
with open(_A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE( self , _A , _A=False , **_A ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
__lowerCAmelCase = " " + text
return (text, kwargs)
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_A )
__lowerCAmelCase = " ".join(_A )
__lowerCAmelCase = self.encode(_A )
if len(_A ) > self.model_max_length:
__lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 92 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A : List[str] = 1_6
A : Dict = 3_2
def __lowerCamelCase ( __a :Accelerator , __a :int = 1_6 , __a :str = "bert-base-cased" ) -> Union[str, Any]:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained(lowercase__ )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :List[str] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( __a :Optional[Any] , __a :Union[str, Any] , __a :Tuple , __a :int ) -> List[str]:
"""simple docstring"""
model.eval()
A__ = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowercase__ )
A__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
A__ = metric.compute()
return eval_metric["accuracy"]
def __lowerCamelCase ( __a :str , __a :List[str] ) -> Any:
"""simple docstring"""
A__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
A__ = args.model_name_or_path
set_seed(lowercase__ )
A__ = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
A__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
A__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A__ = 1
A__ = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
A__ = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the stating epoch so files are named properly
A__ = 0
A__ = evaluate.load("""glue""" , """mrpc""" )
A__ = num_epochs
if args.partial_train_epoch is not None:
A__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ = args.resume_from_checkpoint.split("""epoch_""" )[1]
A__ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ = int(lowercase__ ) + 1
A__ = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
A__ = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
A__ = model(**lowercase__ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ = F'epoch_{epoch}'
A__ = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
A__ = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
A__ = accuracy
A__ = lr_scheduler.get_lr()[0]
A__ = optimizer.param_groups[0]["""lr"""]
A__ = epoch
A__ = overall_step
accelerator.print(F'epoch {epoch}:' , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
A__ = parser.parse_args()
A__ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 351 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A : Optional[int] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A__ = self.transformer_dir
shutil.copy(
os.path.join(__lowerCAmelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
A__ = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=None ) -> Dict:
"""simple docstring"""
A__ = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A__ = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
A__ = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , )
# Copy consistency with a really long name
A__ = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __lowerCAmelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , )
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
self.assertFalse(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__lowerCAmelCase )
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 276 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 96 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
# add QFormer tokenizer
_lowerCamelCase : int = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
encoding.update(lowercase )
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
encoding.update(lowercase )
return encoding
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A_ ( self , lowercase , **lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase )
return super().save_pretrained(lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' )
_lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase )
args.append(lowercase )
return cls(*lowercase ) | 96 | 1 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : Tuple = True
except (ImportError, ModuleNotFoundError):
a : str = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowercase__(A ) ->str:
"""simple docstring"""
re.sub("<n>" , "" , A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A ) )
| 362 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ )
# create a imagenet -> id dictionary for easier use
lowercase__ : int= {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ : Tuple= int(snake_case__ )
lowercase__ : Union[str, Any]= dict(sorted(self.labels.items() ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
lowercase__ : List[Any]= list(snake_case__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 4.0 , snake_case__ = None , snake_case__ = 50 , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : List[Any]= len(snake_case__ )
lowercase__ : Optional[int]= self.transformer.config.sample_size
lowercase__ : List[str]= self.transformer.config.in_channels
lowercase__ : Any= randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , )
lowercase__ : Any= torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ : Tuple= torch.tensor(snake_case__ , device=self.device ).reshape(-1 )
lowercase__ : Any= torch.tensor([1000] * batch_size , device=self.device )
lowercase__ : Tuple= torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ : List[str]= latent_model_input[: len(snake_case__ ) // 2]
lowercase__ : int= torch.cat([half, half] , dim=0 )
lowercase__ : Union[str, Any]= self.scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= t
if not torch.is_tensor(snake_case__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ : List[str]= latent_model_input.device.type == "mps"
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : int= torch.floataa if is_mps else torch.floataa
else:
lowercase__ : Dict= torch.intaa if is_mps else torch.intaa
lowercase__ : Tuple= torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ : Dict= timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int= timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ : Union[str, Any]= self.transformer(
snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample
# perform guidance
if guidance_scale > 1:
lowercase__, lowercase__ : Tuple= noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 )
lowercase__ : str= uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ : Dict= torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ : Optional[int]= torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , snake_case__ , dim=1 )
else:
lowercase__ : int= noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ : List[Any]= self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
if guidance_scale > 1:
lowercase__, lowercase__ : Any= latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ : str= latent_model_input
lowercase__ : Dict= 1 / self.vae.config.scaling_factor * latents
lowercase__ : Any= self.vae.decode(snake_case__ ).sample
lowercase__ : Tuple= (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : List[Any]= samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : Optional[Any]= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case__ )
| 150 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : int ):
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(A )
_UpperCAmelCase : Any = -1
_UpperCAmelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : str = model.generate(A , max_new_tokens=10 , do_sample=A )
_UpperCAmelCase : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : str = TextStreamer(A )
model.generate(A , max_new_tokens=10 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : int = cs.out[:-1]
self.assertEqual(A , A )
def _A ( self : Any ):
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(A )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : Tuple = model.generate(A , max_new_tokens=10 , do_sample=A )
_UpperCAmelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A )
_UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCAmelCase : Dict = Thread(target=model.generate , kwargs=A )
thread.start()
_UpperCAmelCase : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(A )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : str = model.generate(A , max_new_tokens=10 , do_sample=A )
_UpperCAmelCase : List[Any] = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : int = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=10 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Optional[Any] = cs.out[:-1]
self.assertEqual(A , A )
def _A ( self : Dict ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase : int = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(A )
_UpperCAmelCase : Optional[int] = -1
_UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : int = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : str = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(A , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(A )
_UpperCAmelCase : str = -1
_UpperCAmelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
_UpperCAmelCase : List[str] = TextIteratorStreamer(A , timeout=0.001 )
_UpperCAmelCase : List[str] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
_UpperCAmelCase : Tuple = ""
for new_text in streamer:
streamer_text += new_text
| 31 | '''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = 1
@register_to_config
def __init__( self : Optional[int] , A : int = 1000 , A : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A )
# standard deviation of the initial noise distribution
_UpperCAmelCase : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_UpperCAmelCase : int = 4
# running values
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : int , A : Union[str, torch.device] = None ):
_UpperCAmelCase : int = num_inference_steps
_UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
_UpperCAmelCase : Any = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
_UpperCAmelCase : str = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
_UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
_UpperCAmelCase : List[str] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
_UpperCAmelCase : Dict = timesteps.to(A )
_UpperCAmelCase : Dict = []
def _A ( self : Optional[int] , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
_UpperCAmelCase : Tuple = (self.timesteps == timestep).nonzero().item()
_UpperCAmelCase : Optional[Any] = timestep_index + 1
_UpperCAmelCase : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
_UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
_UpperCAmelCase : str = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
_UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_UpperCAmelCase : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_UpperCAmelCase : Union[str, Any] = self._get_prev_sample(A , A , A , A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def _A ( self : Union[str, Any] , A : torch.FloatTensor , *A : Union[str, Any] , **A : Dict ):
return sample
def _A ( self : Optional[Any] , A : Optional[int] , A : int , A : Optional[Any] , A : List[str] ):
_UpperCAmelCase : List[str] = self.alphas[timestep_index]
_UpperCAmelCase : List[Any] = self.betas[timestep_index]
_UpperCAmelCase : Optional[Any] = self.alphas[prev_timestep_index]
_UpperCAmelCase : Dict = self.betas[prev_timestep_index]
_UpperCAmelCase : Tuple = (sample - sigma * ets) / max(A , 1E-8 )
_UpperCAmelCase : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 31 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = num_of_nodes
lowercase = []
lowercase = {}
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase = self.find_component(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
lowercase = self.find_component(__lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = []
lowercase = 0
lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase , lowercase , lowercase = edge
lowercase = self.m_component[u]
lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase , lowercase , lowercase = edge
lowercase = self.m_component[u]
lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
lowercase = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a :
def __init__( self :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Optional[int]=1_3 ,__lowercase :Union[str, Any]=7 ,__lowercase :int=True ,__lowercase :Optional[int]=True ,__lowercase :Tuple=False ,__lowercase :List[str]=True ,__lowercase :Optional[Any]=9_9 ,__lowercase :str=3_2 ,__lowercase :Any=5 ,__lowercase :Optional[int]=4 ,__lowercase :Optional[int]=3_7 ,__lowercase :Optional[Any]="gelu" ,__lowercase :int=0.1 ,__lowercase :str=0.1 ,__lowercase :List[str]=5_1_2 ,__lowercase :int=1_6 ,__lowercase :Optional[int]=2 ,__lowercase :int=0.02 ,__lowercase :Any=3 ,__lowercase :List[str]=4 ,__lowercase :int=None ,):
snake_case__ : Dict = parent
snake_case__ : int = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Tuple = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : Dict = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : int = num_labels
snake_case__ : Optional[int] = num_choices
snake_case__ : Any = scope
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : str = None
snake_case__ : Tuple = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :Tuple ):
snake_case__ : Union[str, Any] = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[int] = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : List[str] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[str] ,__lowercase :Union[str, Any] ,__lowercase :str ,__lowercase :Any ,__lowercase :Tuple ,__lowercase :List[Any] ,__lowercase :str ,__lowercase :Tuple ,__lowercase :Tuple ,):
snake_case__ : Any = True
snake_case__ : Any = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,)
snake_case__ : Optional[Any] = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,)
snake_case__ : str = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :List[str] ,__lowercase :int ,__lowercase :str ,):
snake_case__ : int = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :List[str] ,__lowercase :List[str] ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,):
snake_case__ : Tuple = True
snake_case__ : Optional[int] = True
snake_case__ : List[str] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : List[str] = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,use_cache=__lowercase ,)
snake_case__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Any = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : str = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Any = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
snake_case__ : Dict = model(
__lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowerCAmelCase : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : Any = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self :Dict ):
snake_case__ : Dict = LlamaModelTester(self )
snake_case__ : List[str] = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :str ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = 3
snake_case__ : List[str] = input_dict['''input_ids''']
snake_case__ : str = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :int ):
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = 3
snake_case__ : List[Any] = '''single_label_classification'''
snake_case__ : int = input_dict['''input_ids''']
snake_case__ : List[str] = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Tuple = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
snake_case__ : Dict = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Optional[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = 3
snake_case__ : List[Any] = '''multi_label_classification'''
snake_case__ : Dict = input_dict['''input_ids''']
snake_case__ : str = input_ids.ne(1 ).to(__lowercase )
snake_case__ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __lowerCamelCase ( self :List[str] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCamelCase ( self :Dict ,__lowercase :List[str] ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = ids_tensor([1, 1_0] ,config.vocab_size )
snake_case__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Union[str, Any] = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
snake_case__ : Union[str, Any] = original_model(__lowercase ).last_hidden_state
snake_case__ : List[Any] = original_model(__lowercase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Tuple = {'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : List[Any] = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
snake_case__ : List[str] = scaled_model(__lowercase ).last_hidden_state
snake_case__ : int = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :Tuple ):
snake_case__ : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' )
snake_case__ : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :str ):
snake_case__ : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' )
snake_case__ : Any = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : Optional[int] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Dict = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' )
snake_case__ : Union[str, Any] = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
snake_case__ : str = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case__ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' )
snake_case__ : Optional[Any] = model(torch.tensor(__lowercase ) )
snake_case__ : str = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
snake_case__ : str = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that '''
snake_case__ : Tuple = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case__ : Dict = tokenizer.encode(__lowercase ,return_tensors='''pt''' )
snake_case__ : str = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=__lowercase )
# greedy generation outputs
snake_case__ : List[str] = model.generate(__lowercase ,max_new_tokens=6_4 ,top_p=__lowercase ,temperature=1 ,do_sample=__lowercase )
snake_case__ : List[str] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
| 44 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A__ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''DPTFeatureExtractor''']
A__ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 44 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = '''ResNetConfig'''
# Base docstring
__A = '''microsoft/resnet-50'''
__A = [1, 2_0_4_8, 7, 7]
# Image classification docstring
__A = '''microsoft/resnet-50'''
__A = '''tiger cat'''
__A = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
lowercase__: List[Any] = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , bias=_UpperCAmelCase )
lowercase__: Tuple = nn.BatchNormad(_UpperCAmelCase )
lowercase__: int = ACTaFN[activation] if activation is not None else nn.Identity()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = self.convolution(_UpperCAmelCase )
lowercase__: Any = self.normalization(_UpperCAmelCase )
lowercase__: Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
lowercase__: Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase__: List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase__: int = config.num_channels
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__: Union[str, Any] = self.embedder(_UpperCAmelCase )
lowercase__: Union[str, Any] = self.pooler(_UpperCAmelCase )
return embedding
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 ):
super().__init__()
lowercase__: Optional[int] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
lowercase__: List[str] = nn.BatchNormad(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = self.convolution(_UpperCAmelCase )
lowercase__: Dict = self.normalization(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ):
super().__init__()
lowercase__: List[Any] = in_channels != out_channels or stride != 1
lowercase__: str = (
ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Any = nn.Sequential(
ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , activation=_UpperCAmelCase ) , )
lowercase__: List[str] = ACTaFN[activation]
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = hidden_state
lowercase__: Tuple = self.layer(_UpperCAmelCase )
lowercase__: Union[str, Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
lowercase__: List[str] = self.activation(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , _UpperCAmelCase = 4 ):
super().__init__()
lowercase__: Union[str, Any] = in_channels != out_channels or stride != 1
lowercase__: Union[str, Any] = out_channels // reduction
lowercase__: Union[str, Any] = (
ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Optional[Any] = nn.Sequential(
ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
lowercase__: Any = ACTaFN[activation]
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = hidden_state
lowercase__: str = self.layer(_UpperCAmelCase )
lowercase__: List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
lowercase__: Optional[int] = self.activation(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , ):
super().__init__()
lowercase__: Tuple = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowercase__: List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , activation=config.hidden_act ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = input
for layer in self.layers:
lowercase__: Any = layer(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
lowercase__: List[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ):
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Union[str, Any] = hidden_states + (hidden_state,)
lowercase__: int = stage_module(_UpperCAmelCase )
if output_hidden_states:
lowercase__: List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase , )
class UpperCAmelCase (A__ ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ResNetConfig
_UpperCAmelCase :Optional[Any] = '''resnet'''
_UpperCAmelCase :Tuple = '''pixel_values'''
_UpperCAmelCase :List[str] = True
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = value
__A = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,A__ ,)
class UpperCAmelCase (A__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: str = config
lowercase__: List[Any] = ResNetEmbeddings(_UpperCAmelCase )
lowercase__: Optional[Any] = ResNetEncoder(_UpperCAmelCase )
lowercase__: Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
lowercase__: Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Tuple = self.embedder(_UpperCAmelCase )
lowercase__: Dict = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
lowercase__: Dict = encoder_outputs[0]
lowercase__: Optional[Any] = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A__ ,)
class UpperCAmelCase (A__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: Union[str, Any] = config.num_labels
lowercase__: Any = ResNetModel(_UpperCAmelCase )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Tuple = self.resnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
lowercase__: List[Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase__: str = self.classifier(_UpperCAmelCase )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: List[str] = "single_label_classification"
else:
lowercase__: Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase__: Dict = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: Any = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase__: List[str] = CrossEntropyLoss()
lowercase__: Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: str = BCEWithLogitsLoss()
lowercase__: str = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
lowercase__: Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,A__ ,)
class UpperCAmelCase (A__ ,A__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
super()._init_backbone(_UpperCAmelCase )
lowercase__: Union[str, Any] = [config.embedding_size] + config.hidden_sizes
lowercase__: str = ResNetEmbeddings(_UpperCAmelCase )
lowercase__: List[str] = ResNetEncoder(_UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
lowercase__: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: int = self.embedder(_UpperCAmelCase )
lowercase__: Union[str, Any] = self.encoder(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
lowercase__: List[str] = outputs.hidden_states
lowercase__: int = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase__: List[str] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_UpperCAmelCase , )
| 177 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ) -> Tuple:
with open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
UpperCamelCase__ : Optional[Any] = json.load(__lowerCAmelCase )
UpperCamelCase__ : str = {}
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = []
for key, info in class_info.items():
UpperCamelCase__ : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__lowerCAmelCase ) )
UpperCamelCase__ : Dict = thing_ids
UpperCamelCase__ : Optional[int] = class_names
return metadata
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Tuple=30 , SCREAMING_SNAKE_CASE : Dict=4_00 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=10 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : int=2_55 , SCREAMING_SNAKE_CASE : str="shi-labs/oneformer_demo" , SCREAMING_SNAKE_CASE : List[Any]="ade20k_panoptic.json" , SCREAMING_SNAKE_CASE : Tuple=10 , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[int] = min_resolution
UpperCamelCase__ : Union[str, Any] = max_resolution
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : List[Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
UpperCamelCase__ : Dict = do_normalize
UpperCamelCase__ : Optional[int] = image_mean
UpperCamelCase__ : Union[str, Any] = image_std
UpperCamelCase__ : Union[str, Any] = class_info_file
UpperCamelCase__ : Tuple = prepare_metadata(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = num_text
UpperCamelCase__ : int = repo_path
# for the post_process_functions
UpperCamelCase__ : int = 2
UpperCamelCase__ : str = 10
UpperCamelCase__ : Any = 10
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : List[Any] = 4
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : Tuple = do_reduce_labels
UpperCamelCase__ : List[str] = ignore_index
def __lowercase ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ : Any = int(self.size["shortest_edge"] * h / w )
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
elif w > h:
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
UpperCamelCase__ : int = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase__ : Optional[Any] = self.size["shortest_edge"]
UpperCamelCase__ : str = self.size["shortest_edge"]
else:
UpperCamelCase__ : Tuple = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase__ : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def __lowercase ( self : Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCAmelCase : List[str] = image_processing_class
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "ignore_index" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "class_info_file" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_text" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "repo_path" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "metadata" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_reduce_labels" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Dict = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase__ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase__ : List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any="np" ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase__ : Any = self.image_processing_tester.num_labels
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
UpperCamelCase__ : Tuple = num_labels
if is_instance_map:
UpperCamelCase__ : List[str] = list(range(SCREAMING_SNAKE_CASE ) ) * 2
UpperCamelCase__ : Optional[Any] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase__ : List[str] = [Image.fromarray(SCREAMING_SNAKE_CASE ) for annotation in annotations]
UpperCamelCase__ : Optional[int] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , return_tensors="pt" , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE , )
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
def common(SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : str=None ):
UpperCamelCase__ : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE , is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inputs["mask_labels"]
UpperCamelCase__ : Optional[Any] = inputs["class_labels"]
UpperCamelCase__ : List[str] = inputs["pixel_values"]
UpperCamelCase__ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = np.zeros((20, 50) )
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = binary_mask_to_rle(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase__ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase__ : Optional[Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE , target_sizes=SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) | 189 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowerCamelCase : Optional[int] = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_lowerCamelCase : Tuple = 10
_lowerCamelCase : Optional[int] = 256
def a_ ( __lowercase : List[str] ) -> Optional[MinHash]:
if len(__lowercase ) < MIN_NUM_TOKENS:
return None
_snake_case = MinHash(num_perm=__lowercase )
for token in set(__lowercase ):
min_hash.update(token.encode() )
return min_hash
def a_ ( __lowercase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(__lowercase ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] , *,
lowercase : float = 0.85 , ):
'''simple docstring'''
_snake_case = duplication_jaccard_threshold
_snake_case = NUM_PERM
_snake_case = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_snake_case = defaultdict(lowercase )
def A ( self : Union[str, Any] , lowercase : Tuple , lowercase : MinHash ):
'''simple docstring'''
_snake_case = self._index.query(lowercase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(lowercase , lowercase )
if len(lowercase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = []
for base, duplicates in self._duplicate_clusters.items():
_snake_case = [base] + list(lowercase )
# reformat the cluster to be a list of dict
_snake_case = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(lowercase )
return duplicate_clusters
def A ( self : Any , lowercase : Tuple ):
'''simple docstring'''
_snake_case = self.get_duplicate_clusters()
with open(lowercase , 'w' ) as f:
json.dump(lowercase , lowercase )
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case , _snake_case = element
_snake_case = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a_ ( __lowercase : Type[Dataset] ) -> Optional[int]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowercase , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def a_ ( __lowercase : Type[Dataset] , __lowercase : float ) -> str:
_snake_case = DuplicationIndex(duplication_jaccard_threshold=__lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowercase ) ) , max_queue_size=100 ) ):
di.add(__lowercase , __lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a_ ( __lowercase : str , __lowercase : str ) -> float:
_snake_case = get_tokens(__lowercase )
_snake_case = get_tokens(__lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowerCamelCase : List[str] = None
def a_ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Union[str, Any]:
_snake_case = []
for elementa in cluster:
_snake_case = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_snake_case = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(__lowercase , __lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_snake_case = 1
extremes.append(__lowercase )
return extremes
def a_ ( __lowercase : List[Any] , __lowercase : Any , __lowercase : Any ) -> str:
global _shared_dataset
_snake_case = dataset
_snake_case = []
_snake_case = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowercase , __lowercase , ) , total=len(__lowercase ) , ):
extremes_list.append(__lowercase )
return extremes_list
def a_ ( __lowercase : Type[Dataset] , __lowercase : float = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_snake_case = make_duplicate_clusters(__lowercase , __lowercase )
_snake_case = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
_snake_case = {}
_snake_case = find_extremes(__lowercase , __lowercase , __lowercase )
for extremes in extremes_clusters:
for element in extremes:
_snake_case = element
_snake_case = duplicate_indices - set(extreme_dict.keys() )
_snake_case = dataset.filter(lambda __lowercase , __lowercase : idx not in remove_indices , with_indices=__lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_snake_case = element['base_index'] in extreme_dict
if element["is_extreme"]:
_snake_case = extreme_dict[element['base_index']]['copies']
print(f'''Original dataset size: {len(__lowercase )}''' )
print(f'''Number of duplicate clusters: {len(__lowercase )}''' )
print(f'''Files in duplicate cluster: {len(__lowercase )}''' )
print(f'''Unique files in duplicate cluster: {len(__lowercase )}''' )
print(f'''Filtered dataset size: {len(__lowercase )}''' )
return ds_filter, duplicate_clusters | 130 |
import baseaa
def a_ ( __lowercase : str ) -> bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def a_ ( __lowercase : bytes ) -> str:
return baseaa.aaadecode(__lowercase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _UpperCamelCase ) ->Dict:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def __lowercase ( ) ->Tuple:
"""simple docstring"""
lowercase : int = ArgumentParser(
'''HuggingFace Datasets CLI tool''', usage='''datasets-cli <command> [<args>]''', allow_abbrev=__snake_case )
lowercase : Optional[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
TestCommand.register_subcommand(__snake_case )
RunBeamCommand.register_subcommand(__snake_case )
DummyDataCommand.register_subcommand(__snake_case )
# Parse args
lowercase : Optional[Any] = parser.parse_known_args()
if not hasattr(__snake_case, '''func''' ):
parser.print_help()
exit(1 )
lowercase : Any = parse_unknown_args(__snake_case )
# Run
lowercase : List[Any] = args.func(__snake_case, **__snake_case )
service.run()
if __name__ == "__main__":
main()
| 337 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Dict = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : int = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Optional[int] = hidden_act
__A : str = intermediate_size
__A : Union[str, Any] = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : str = type_vocab_size
__A : Any = initializer_range
__A : int = layer_norm_eps
__A : Optional[int] = position_embedding_type
__A : int = use_cache
__A : Union[str, Any] = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 190 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A ( __snake_case ):
__magic_name__ = ''''''
__magic_name__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(self , **SCREAMING_SNAKE_CASE )
A : List[Any] = repo_info
A : Any = token
A : Dict = None
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
if self.dir_cache is None:
A : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A : Dict = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE ): {'''name''': str(SCREAMING_SNAKE_CASE ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "rb" , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
A : Dict = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self._get_dirs()
A : List[str] = self._strip_protocol(SCREAMING_SNAKE_CASE )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
self._get_dirs()
A : Optional[Any] = PurePosixPath(path.strip('''/''' ) )
A : Optional[int] = {}
for p, f in self.dir_cache.items():
A : Dict = PurePosixPath(p.strip('''/''' ) )
A : List[str] = p.parent
if root == path:
A : Optional[Any] = f
A : Tuple = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 311 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Dict = "src/transformers"
_lowercase : List[Any] = "docs/source/en"
_lowercase : Optional[Any] = "."
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : Tuple = f.readlines()
# Find the start prompt.
lowercase_ : List[str] = 0
while not lines[start_index].startswith(__SCREAMING_SNAKE_CASE ):
start_index += 1
start_index += 1
lowercase_ : List[str] = start_index
while not lines[end_index].startswith(__SCREAMING_SNAKE_CASE ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : List[Any] = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : Dict = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : str = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Any = direct_transformers_import(TRANSFORMERS_PATH)
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : Optional[Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __SCREAMING_SNAKE_CASE )
return [m.group(0 ) for m in matches]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Union[str, Any] = 2 if text == '''✅''' or text == '''❌''' else len(__SCREAMING_SNAKE_CASE )
lowercase_ : str = (width - text_length) // 2
lowercase_ : str = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : Dict = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : Union[str, Any] = collections.defaultdict(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = collections.defaultdict(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = collections.defaultdict(__SCREAMING_SNAKE_CASE )
lowercase_ : str = collections.defaultdict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = collections.defaultdict(__SCREAMING_SNAKE_CASE )
# Let's lookup through all transformers object (once).
for attr_name in dir(__SCREAMING_SNAKE_CASE ):
lowercase_ : Any = None
if attr_name.endswith('''Tokenizer''' ):
lowercase_ : Any = slow_tokenizers
lowercase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowercase_ : Dict = fast_tokenizers
lowercase_ : Optional[int] = attr_name[:-13]
elif _re_tf_models.match(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Any = tf_models
lowercase_ : Any = _re_tf_models.match(__SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_flax_models.match(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Union[str, Any] = flax_models
lowercase_ : Dict = _re_flax_models.match(__SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_pt_models.match(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : str = pt_models
lowercase_ : Any = _re_pt_models.match(__SCREAMING_SNAKE_CASE ).groups()[0]
if lookup_dict is not None:
while len(__SCREAMING_SNAKE_CASE ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[int] = ''''''.join(camel_case_split(__SCREAMING_SNAKE_CASE )[:-1] )
# Let's build that table!
lowercase_ : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Any = [len(__SCREAMING_SNAKE_CASE ) + 2 for c in columns]
lowercase_ : Any = max([len(__SCREAMING_SNAKE_CASE ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : str = '''|''' + '''|'''.join([_center_text(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c, w in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowercase_ : int = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowercase_ : Dict = model_name_to_prefix[name]
lowercase_ : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for l, w in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] ) + "|\n"
return table
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = _find_text_in_file(
filename=os.path.join(__SCREAMING_SNAKE_CASE , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowercase_ : str = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCamelCase = '''true'''
def lowerCamelCase_ ( _a , _a=82 , _a=16 ):
"""simple docstring"""
set_seed(42 )
lowerCAmelCase__ : Any = RegressionModel()
lowerCAmelCase__ : List[Any] = deepcopy(_a )
lowerCAmelCase__ : List[Any] = RegressionDataset(length=_a )
lowerCAmelCase__ : Optional[int] = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
lowerCAmelCase__ : Optional[Any] = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a , _a=False ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ : Dict = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(_a ):
lowerCAmelCase__ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ : str = dataset.map(
_a , batched=_a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_a ):
if use_longest:
return tokenizer.pad(_a , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(_a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Accelerator(dispatch_batches=_a , split_batches=_a )
lowerCAmelCase__ : Optional[Any] = get_dataloader(_a , not dispatch_batches )
lowerCAmelCase__ : Any = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=_a )
lowerCAmelCase__ : Optional[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = []
for batch in dataloader:
lowerCAmelCase__ : Union[str, Any] = batch.values()
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(_a )
lowerCAmelCase__ : Tuple = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
lowerCAmelCase__ : Any = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a , _a=82 , _a=False , _a=False , _a=16 ):
"""simple docstring"""
lowerCAmelCase__ : Dict = get_basic_setup(_a , _a , _a )
lowerCAmelCase__ : Any = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'
def lowerCamelCase_ ( _a = False , _a = False ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ : str = get_mrpc_setup(_a , _a )
# First do baseline
lowerCAmelCase__ : Optional[Any] = setup['''no''']
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
lowerCAmelCase__ : int = model(**_a )
lowerCAmelCase__ : List[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch['''labels'''] )
lowerCAmelCase__ : int = metric.compute()
# Then do distributed
lowerCAmelCase__ : List[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ : str = model(**_a )
lowerCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : int = batch['''labels''']
lowerCAmelCase__ : Any = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
lowerCAmelCase__ : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ : Union[str, Any] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ : Any = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 370 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _a , _a=1_000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase__ : int = n - 1
lowerCAmelCase__ : Any = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase__ : Optional[Any] = 0
while count < prec:
lowerCAmelCase__ : Optional[Any] = random.randint(2 , n - 1 )
lowerCAmelCase__ : List[Any] = bin_exp_mod(_a , _a , _a )
if b != 1:
lowerCAmelCase__ : Dict = True
for _ in range(_a ):
if b == n - 1:
lowerCAmelCase__ : Union[str, Any] = False
break
lowerCAmelCase__ : Tuple = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 211 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
a_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
a_ = '''▁'''
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ ="""left"""
a_ =XLNetTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , **__UpperCAmelCase , )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = 3
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 340 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 1 |
from collections.abc import Callable
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : float = a
_lowerCAmelCase : float = b
if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCamelCase ) == 0:
return b
elif (
function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
_lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_lowerCamelCase ) == 0:
return mid
elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0:
_lowerCAmelCase : Tuple = mid
else:
_lowerCAmelCase : Union[str, Any] = mid
_lowerCAmelCase : Optional[int] = start + (end - start) / 2.0
return mid
def A ( _lowerCamelCase ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 300 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase : Any = random.Random()
def A_ ( _UpperCAmelCase , _UpperCAmelCase=1.0 , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: Tuple = global_rng
SCREAMING_SNAKE_CASE_: Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Tuple=400 , lowerCAmelCase__ : int=2000 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Any=160 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : int=4000 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_: Optional[Any] = min_seq_length
SCREAMING_SNAKE_CASE_: str = max_seq_length
SCREAMING_SNAKE_CASE_: int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_: Union[str, Any] = padding_value
SCREAMING_SNAKE_CASE_: Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE_: List[str] = return_attention_mask
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: int = feature_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = chunk_length
SCREAMING_SNAKE_CASE_: Optional[Any] = hop_length
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Dict=False):
def _flatten(lowerCAmelCase__ : int):
return list(itertools.chain(*lowerCAmelCase__))
if equal_length:
SCREAMING_SNAKE_CASE_: List[Any] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_: List[Any] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
SCREAMING_SNAKE_CASE_: Tuple = [np.asarray(lowerCAmelCase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = WhisperFeatureExtractor if is_speech_available() else None
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = WhisperFeatureExtractionTester(self)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: str = feat_extract_first.save_pretrained(lowerCAmelCase__)[0]
check_json_file_has_correct_format(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.feature_extraction_class.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_: List[Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_: Dict = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_: Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__))
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Any = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(lowerCAmelCase__ , "feat_extract.json")
feat_extract_first.to_json_file(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.feature_extraction_class.from_json_file(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_: Dict = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_: Any = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_: Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__))
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_: Union[str, Any] = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
SCREAMING_SNAKE_CASE_: Dict = [np.asarray(lowerCAmelCase__) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_: List[str] = feature_extractor(lowerCAmelCase__ , padding="max_length" , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
SCREAMING_SNAKE_CASE_: Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
SCREAMING_SNAKE_CASE_: Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# Test batched
SCREAMING_SNAKE_CASE_: int = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
SCREAMING_SNAKE_CASE_: Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_: int = [floats_list((1, x))[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.asarray(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
SCREAMING_SNAKE_CASE_: Union[str, Any] = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# Test truncation required
SCREAMING_SNAKE_CASE_: Optional[Any] = [floats_list((1, x))[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200)]
SCREAMING_SNAKE_CASE_: List[Any] = [np.asarray(lowerCAmelCase__) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE_: Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE_: Tuple = [np.asarray(lowerCAmelCase__) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE_: Any = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
SCREAMING_SNAKE_CASE_: Any = feature_extractor(lowerCAmelCase__ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
def _SCREAMING_SNAKE_CASE ( self : str):
import torch
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.random.rand(100 , 32).astype(np.floataa)
SCREAMING_SNAKE_CASE_: Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_: str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
SCREAMING_SNAKE_CASE_: List[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_: Union[str, Any] = ds.sort("id").select(range(lowerCAmelCase__))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self : str):
# fmt: off
SCREAMING_SNAKE_CASE_: str = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
])
# fmt: on
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._load_datasamples(1)
SCREAMING_SNAKE_CASE_: Optional[int] = WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE_: str = feature_extractor(lowerCAmelCase__ , return_tensors="pt").input_features
self.assertEqual(input_features.shape , (1, 80, 3000))
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase__ , atol=1E-4))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
SCREAMING_SNAKE_CASE_: Dict = self._load_datasamples(1)[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE_: int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase__)[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase__) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__) - 1) < 1E-3))
| 13 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowercase = CycleDiffusionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase_ : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1_0_0_0 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase_ : Tuple = CLIPTextModel(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : List[Any] , snake_case : List[str]=0 ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[Any] = image / 2 + 0.5
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
UpperCamelCase_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[str] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : Optional[int] = self.get_dummy_components()
UpperCamelCase_ : List[str] = CycleDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[Any] = pipe(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = output.images
UpperCamelCase_ : int = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_ : Tuple = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = self.get_dummy_components()
for name, module in components.items():
if hasattr(_SCREAMING_SNAKE_CASE , 'half' ):
UpperCamelCase_ : Optional[int] = module.half()
UpperCamelCase_ : str = CycleDiffusionPipeline(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = pipe(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = output.images
UpperCamelCase_ : List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_ : Optional[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
UpperCamelCase_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
UpperCamelCase_ : Union[str, Any] = init_image.resize((5_1_2, 5_1_2) )
UpperCamelCase_ : Union[str, Any] = 'CompVis/stable-diffusion-v1-4'
UpperCamelCase_ : Tuple = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='scheduler' )
UpperCamelCase_ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase_ : Union[str, Any] = 'A black colored car'
UpperCamelCase_ : Tuple = 'A blue colored car'
UpperCamelCase_ : Dict = torch.manual_seed(0 )
UpperCamelCase_ : Any = pipe(
prompt=_SCREAMING_SNAKE_CASE , source_prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
UpperCamelCase_ : Any = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
UpperCamelCase_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
UpperCamelCase_ : Optional[int] = init_image.resize((5_1_2, 5_1_2) )
UpperCamelCase_ : str = 'CompVis/stable-diffusion-v1-4'
UpperCamelCase_ : List[str] = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='scheduler' )
UpperCamelCase_ : List[Any] = CycleDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase_ : str = 'A black colored car'
UpperCamelCase_ : Union[str, Any] = 'A blue colored car'
UpperCamelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , source_prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
UpperCamelCase_ : str = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 355 | import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , ):
if attention_mask is None:
UpperCamelCase_ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ : Optional[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
UpperCamelCase_ : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
UpperCamelCase_ : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowercase :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : str=1_3 , snake_case : Optional[int]=7 , snake_case : int=True , snake_case : str=False , snake_case : str=9_9 , snake_case : int=1_6 , snake_case : str=2 , snake_case : Dict=4 , snake_case : Tuple=4 , snake_case : List[Any]="relu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : List[str]=0.0 , snake_case : int=0.0 , snake_case : Any=2_0 , snake_case : Union[str, Any]=2 , snake_case : Tuple=1 , snake_case : Optional[int]=0 , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Tuple = seq_length
UpperCamelCase_ : Dict = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : Tuple = vocab_size
UpperCamelCase_ : List[str] = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : List[Any] = encoder_layerdrop
UpperCamelCase_ : Any = decoder_layerdrop
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Dict = eos_token_id
UpperCamelCase_ : int = pad_token_id
UpperCamelCase_ : str = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Any = self.eos_token_id # Eos Token
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ : str = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : str = self.get_config()
UpperCamelCase_ : Any = prepare_mam_aaa_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
UpperCamelCase_ : str = inputs_dict['input_ids']
UpperCamelCase_ : Any = inputs_dict['attention_mask']
UpperCamelCase_ : Optional[int] = inputs_dict['head_mask']
# first forward pass
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , head_mask=snake_case , use_cache=snake_case )
UpperCamelCase_, UpperCamelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ : Union[str, Any] = model(snake_case , attention_mask=snake_case )['last_hidden_state']
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
'last_hidden_state'
]
# select random slice
UpperCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-2 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int , snake_case : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModel(config=snake_case ).to(snake_case ).eval()
UpperCamelCase_ : List[str] = model(**snake_case )
UpperCamelCase_ : List[Any] = outputs.encoder_last_hidden_state
UpperCamelCase_ : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : Optional[int] = model.get_encoder()
encoder.save_pretrained(snake_case )
UpperCamelCase_ : Tuple = MaMaaaEncoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : Optional[Any] = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : int = model.get_decoder()
decoder.save_pretrained(snake_case )
UpperCamelCase_ : int = MaMaaaDecoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : int = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=snake_case , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : str , snake_case : str , snake_case : Dict ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModelTester(self )
UpperCamelCase_ : Optional[Any] = ConfigTester(self , config_class=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase_ : int = model_class(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_, UpperCamelCase_ : str = model_class.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertEqual(info['missing_keys'] , [] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[Any] = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) )
if not self.is_encoder_decoder:
UpperCamelCase_ : List[Any] = inputs['input_ids']
del inputs["input_ids"]
else:
UpperCamelCase_ : str = inputs['input_ids']
UpperCamelCase_ : List[str] = inputs.get('decoder_input_ids' , snake_case )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , snake_case )
UpperCamelCase_ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase_ : Tuple = wte(snake_case )
else:
UpperCamelCase_ : Optional[int] = wte(snake_case )
UpperCamelCase_ : Optional[int] = wte(snake_case )
with torch.no_grad():
model(**snake_case )[0]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : str = input_dict['input_ids']
UpperCamelCase_ : int = input_ids.ne(1 ).to(snake_case )
UpperCamelCase_ : Dict = MaMaaaForConditionalGeneration(snake_case ).eval().to(snake_case )
if torch_device == "cuda":
model.half()
model.generate(snake_case , attention_mask=snake_case )
model.generate(num_beams=4 , do_sample=snake_case , early_stopping=snake_case , num_return_sequences=3 )
def __lowercase ( lowerCamelCase : List[Any] ):
return torch.tensor(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Dict = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Any = model(**snake_case )[0]
UpperCamelCase_ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
# change to intended input
UpperCamelCase_ : Tuple = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Union[str, Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Dict = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Dict = model(**snake_case )[0]
UpperCamelCase_ : Union[str, Any] = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Any = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : Optional[int] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
UpperCamelCase_ : Union[str, Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase_ : Optional[Any] = tokenizer(snake_case , padding=snake_case , return_tensors='pt' )
UpperCamelCase_ : Dict = model.generate(
input_ids=dct['input_ids'].to(snake_case ) , attention_mask=dct['attention_mask'].to(snake_case ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
UpperCamelCase_ : Optional[int] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
UpperCamelCase_ : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=snake_case , skip_special_tokens=snake_case )
assert generated == expected_en
| 50 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.